Skip to content

Commit

Permalink
v0.6.1 Revised UI, added GEE, glmmrbase 0.11.3
Browse files Browse the repository at this point in the history
  • Loading branch information
samuel-watson committed Dec 20, 2024
1 parent dc17c90 commit 58373a4
Show file tree
Hide file tree
Showing 35 changed files with 2,357 additions and 1,055 deletions.
5 changes: 2 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ SOURCES += $(IMGUI_DIR)/imgui.cpp $(IMGUI_DIR)/imgui_draw.cpp $(IMGUI_DIR)/imgui
SOURCES += ./src/mainmenu.cpp ./src/designer.cpp ./src/samplesize.cpp ./src/model.cpp
SOURCES += ./src/results.cpp ./src/optimiser.cpp ./src/plotter.cpp
SOURCES += ./src/sequenceperiod.cpp ./src/multiColorButton.cpp ./src/design.cpp
SOURCES += ./src/statisticalmodel.cpp ./src/glmmmodel.cpp ./src/modelupdater.cpp ./src/plotdata.cpp
SOURCES += ./src/statisticalmodel.cpp ./src/menubar.cpp ./src/glmmmodel.cpp ./src/modelupdater.cpp ./src/plotdata.cpp
SOURCES += ./src/modelchecker.cpp ./src/krigingdata.cpp ./src/krigger.cpp ./src/datasimulate.cpp
SOURCES += ./src/applog.cpp ./src/logger.cpp
SOURCES += $(IMPLOT_DIR)/implot.cpp $(IMPLOT_DIR)/implot_items.cpp
Expand All @@ -30,5 +30,4 @@ all: $(SOURCES) $(OUTPUT)
$(OUTPUT): $(SOURCES)
$(CXX) $(SOURCES) -std=c++20 -o $(OUTPUT) $(LIBS) $(WEBGL_VER) -Os -g2 -fno-math-errno -s NO_DISABLE_EXCEPTION_CATCHING --preload-file data $(USE_WASM) -I$(IMGUI_DIR) -I$(IMGUI_DIR)/backends $(LDFLAGS) $(CPPFLAGS)
clean:
rm -f $(OUTPUT)

rm -f $(OUTPUT)
2 changes: 2 additions & 0 deletions include/clusterapp.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ namespace ClusterApp {

void RenderModel(ClusterApp::design& design, ClusterApp::statisticalModel& model, ClusterApp::options& option);

void RenderMenuBar(ClusterApp::design& design, ClusterApp::statisticalModel& model,ClusterApp::modelChecker& checker, ClusterApp::options& option);

void RenderResults(ClusterApp::modelChecker& checker, ClusterApp::options& option);

void RenderOptimiser(ClusterApp::design& design, ClusterApp::modelUpdater& updater, ClusterApp::modelSummary& summary, ClusterApp::options& option);
Expand Down
17 changes: 16 additions & 1 deletion include/clusterclasses.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ namespace ClusterApp {
void update_beta(ClusterApp::design& design);
void set_beta_random(const double m, const double s);
float alpha = 0.05;
float quantile = 0.5;
//float quantile = 0.5;
};

struct modelSummary {
Expand All @@ -135,14 +135,19 @@ namespace ClusterApp {
double power_bw = 0;
double power_box = 0;
double power_sat = 0;
double power_gee_indep = 0;
double power_gee_indep_bw = 0;
double ci_width = 0;
double ci_width_kr = 0;
double ci_width_kr2 = 0;
double ci_width_bw = 0;
double ci_width_sat = 0;
double ci_width_gee_indep = 0;
double ci_width_gee_indep_bw = 0;
double se = 1;
double se_kr = 1;
double se_kr2 = 1;
double se_gee_indep = 1;
double dof = 1;
double dof_kr = 1;
double dof_bw = 1;
Expand All @@ -155,14 +160,19 @@ namespace ClusterApp {
double power_bw_2 = 0;
double power_box_2 = 0;
double power_sat_2 = 0;
double power_gee_indep_2 = 0;
double power_gee_indep_bw_2 = 0;
double ci_width_2 = 0;
double ci_width_kr_2 = 0;
double ci_width_kr2_2 = 0;
double ci_width_bw_2 = 0;
double ci_width_sat_2 = 0;
double ci_width_gee_indep_2 = 0;
double ci_width_gee_indep_bw_2 = 0;
double se_2 = 1;
double se_kr_2 = 1;
double se_kr2_2 = 1;
double se_gee_indep_2 = 1;
double dof_2 = 1;
double dof_kr_2 = 1;
double dof_bw_2 = 1;
Expand All @@ -174,14 +184,19 @@ namespace ClusterApp {
double power_bw_12 = 0;
double power_box_12 = 0;
double power_sat_12 = 0;
double power_gee_indep_12 = 0;
double power_gee_indep_bw_12 = 0;
double ci_width_12 = 0;
double ci_width_kr_12 = 0;
double ci_width_kr2_12 = 0;
double ci_width_bw_12 = 0;
double ci_width_sat_12 = 0;
double ci_width_gee_indep_12 = 0;
double ci_width_gee_indep_bw_12 = 0;
double se_12 = 1;
double se_kr_12 = 1;
double se_kr2_12 = 1;
double se_gee_indep_12 = 1;
double dof_12 = 1;
double dof_kr_12 = 1;
double dof_bw_12 = 1;
Expand Down
45 changes: 43 additions & 2 deletions include/glmmr.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#pragma once

// #include <variant>
#include "glmmr/general.h"
#include "glmmr/maths.h"
#include "glmmr/formula.hpp"
Expand All @@ -8,6 +9,46 @@
#include "glmmr/model.hpp"
#include "glmmr/modelbits.hpp"
#include "glmmr/openmpheader.h"
#include "glmmr/nngpcovariance.hpp"

// [[Rcpp::depends(RcppEigen)]]

typedef glmmr::Model<bits > glmm;
typedef glmmr::Model<bits_nngp> glmm_nngp;
typedef glmmr::Model<bits_hsgp > glmm_hsgp;

enum class Type {
GLMM = 0,
GLMM_NNGP = 1,
GLMM_HSGP = 2
};

/*
template<class... Ts> struct overloaded : Ts... { using Ts::operator()...; };
template<class... Ts> overloaded(Ts...) -> overloaded<Ts...>;
struct glmmrType
{
std::variant<int, Rcpp::XPtr<glmm>, Rcpp::XPtr<glmm_nngp>, Rcpp::XPtr<glmm_hsgp> > ptr;
glmmrType(SEXP xp, Type type) : ptr(0) {
if(type == Type::GLMM){
Rcpp::XPtr<glmm> newptr(xp);
ptr = newptr;
} else if(type== Type::GLMM_NNGP){
Rcpp::XPtr<glmm_nngp> newptr(xp);
ptr = newptr;
} else if(type == Type::GLMM_HSGP){
Rcpp::XPtr<glmm_hsgp> newptr(xp);
ptr = newptr;
}
}
};
using returnType = std::variant<int, double, bool, Eigen::VectorXd, Eigen::ArrayXd, Eigen::MatrixXd,
dblvec, strvec, intvec, VectorMatrix, MatrixMatrix, CorrectionData<glmmr::SE::KR>,
CorrectionData<glmmr::SE::KR2>, CorrectionData<glmmr::SE::KRBoth>,
CorrectionData<glmmr::SE::Sat>, std::vector<Eigen::MatrixXd>, std::pair<double,double>, BoxResults,
std::pair<int,int> >;
*/


typedef glmmr::ModelBits<glmmr::Covariance, glmmr::LinearPredictor> bits;
typedef glmmr::Model<glmmr::ModelBits<glmmr::Covariance, glmmr::LinearPredictor> > glmm;
9 changes: 7 additions & 2 deletions include/glmmr/calculator.hpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
#pragma once

#include "general.h"
#include "openmpheader.h"
#include <stack>
#include <boost/math/special_functions/bessel.hpp>
#include <boost/math/special_functions/polygamma.hpp>
#include <boost/math/special_functions/gamma.hpp>
#include <boost/math/special_functions/digamma.hpp>
#include <boost/math/special_functions/erf.hpp>
#include "maths.h"
#include "instructions.h"

enum class CalcDyDx {
Expand Down
112 changes: 90 additions & 22 deletions include/glmmr/covariance.hpp
Original file line number Diff line number Diff line change
@@ -1,20 +1,36 @@
#pragma once

#define _USE_MATH_DEFINES

#include "openmpheader.h"
#include "general.h"
#include <sstream>
#include "maths.h"
#include "algo.h"
#include "interpreter.h"
#include "formula.hpp"
#include "sparse.h"
#include "calculator.hpp"
#include "linearpredictor.hpp"

using namespace Eigen;

namespace glmmr {

inline bool validate_fn(const str& fn){
bool not_fn = str_to_covfunc.find(fn) == str_to_covfunc.end();
return not_fn;
}

inline bool is_compact_fn(const CovFunc& fn){
bool compact = false;
if(fn == CovFunc::truncpow2 || fn == CovFunc::truncpow3 || fn == CovFunc::truncpow4 || fn == CovFunc::truncpow20 || fn == CovFunc::truncpow30
|| fn == CovFunc::truncpow40 || fn == CovFunc::cauchy || fn == CovFunc::cauchy3 || fn == CovFunc::cauchy0 || fn == CovFunc::cauchy30) compact = true;
return compact;
}

struct ZNonZero {
int col;
intvec rows;
int xcol;
};

class Covariance {
public:
// objects
Expand Down Expand Up @@ -59,15 +75,17 @@ class Covariance {
intvec parameter_fn_index() const;
virtual intvec re_count() const;
virtual sparse ZL_sparse();
virtual sparse Z_sparse() const;
virtual sparse Z_sparse();
strvec parameter_names();
virtual void derivatives(std::vector<MatrixXd>& derivs,int order = 1);
virtual VectorXd log_gradient(const MatrixXd &umat, double& logl);
void linear_predictor_ptr(glmmr::LinearPredictor* ptr);

protected:
// data
std::vector<glmmr::calculator> calc_;
std::vector<std::vector<CovFunc> > fn_;
glmmr::LinearPredictor* linpred_ptr = nullptr;
intvec re_fn_par_link_;
intvec re_count_;
intvec re_order_;
Expand All @@ -86,17 +104,24 @@ class Covariance {
bool isSparse = true;
sparse matL;
SparseChol spchol;

// functions
void update_parameters_in_calculators();
MatrixXd get_block(int b);
MatrixXd get_chol_block(int b,bool upper = false);
MatrixXd D_builder(int b,bool chol = false,bool upper = false);
void update_ax();
void L_constructor();
void Z_constructor();
MatrixXd D_sparse_builder(bool chol = false,bool upper = false);
bool sparse_initialised = false;
bool use_amd_permute = true;
void update_parameters_in_calculators();
MatrixXd get_block(int b);
MatrixXd get_chol_block(int b,bool upper = false);
MatrixXd D_builder(int b,bool chol = false,bool upper = false);
void update_ax();
void L_constructor();
void Z_constructor();
void Z_updater();
MatrixXd D_sparse_builder(bool chol = false, bool upper = false);
// logical flags
bool sparse_initialised = false;
bool use_amd_permute = true;
public:
bool z_requires_update = false;
protected:
std::vector<ZNonZero> z_nonzero;
};

}
Expand All @@ -113,7 +138,7 @@ inline glmmr::Covariance::Covariance(const str& formula,
inline glmmr::Covariance::Covariance(const glmmr::Formula& form,
const ArrayXXd &data,
const strvec& colnames) :
form_(form), data_(data), colnames_(colnames),
form_(form), data_(data), colnames_(colnames),
Q_(parse()),matZ(), dmat_matrix(max_block_dim(),max_block_dim()),
zquad(max_block_dim()) {
Z_constructor();
Expand All @@ -134,7 +159,7 @@ inline glmmr::Covariance::Covariance(const glmmr::Formula& form,
const ArrayXXd &data,
const strvec& colnames,
const dblvec& parameters) :
form_(form), data_(data), colnames_(colnames), parameters_(parameters),
form_(form), data_(data), colnames_(colnames), parameters_(parameters),
Q_(parse()), matZ(), dmat_matrix(max_block_dim(),max_block_dim()),
zquad(max_block_dim()) {
make_sparse();
Expand All @@ -146,7 +171,8 @@ inline glmmr::Covariance::Covariance(const str& formula,
const strvec& colnames,
const ArrayXd& parameters) :
form_(formula), data_(data), colnames_(colnames),
parameters_(parameters.data(),parameters.data()+parameters.size()),Q_(parse()), matZ(),
parameters_(parameters.data(),parameters.data()+parameters.size()),
Q_(parse()), matZ(),
dmat_matrix(max_block_dim(),max_block_dim()),
zquad(max_block_dim()) {
make_sparse();
Expand Down Expand Up @@ -233,7 +259,14 @@ inline int glmmr::Covariance::parse(){
} else {
auto idxz = std::find(colnames_.begin(),colnames_.end(),form_.z_[i]);
if(idxz == colnames_.end()){
throw std::runtime_error("z variable "+form_.z_[i]+" not in column names");
auto idxzpar = std::find(form_.fe_parameter_names_.begin(),form_.fe_parameter_names_.end(),form_.z_[i]);
if(idxzpar== form_.fe_parameter_names_.end()){
throw std::runtime_error("z variable "+form_.z_[i]+" not in column or parameter names");
} else {
z_requires_update = true;
zcol = idxzpar - form_.fe_parameter_names_.begin();
zcol = -1*(zcol+2);
}
} else {
zcol = idxz - colnames_.begin();
}
Expand Down Expand Up @@ -445,6 +478,12 @@ inline void glmmr::Covariance::Z_constructor()
dblvec vals(block_nvar[i]);
dblvec valscomp(block_nvar[i]);
for(int j = 0; j < block_size[i]; j++){
ZNonZero nonzero;
if (z_[i]< -1){
nonzero.col = zcount;
int xidx = -1*(z_[i]+2);
nonzero.xcol = xidx;
}
for(int m = 0; m < block_nvar[i]; m++){
valscomp[m] = re_temp_data_[i][j][m];
}
Expand All @@ -453,16 +492,42 @@ inline void glmmr::Covariance::Z_constructor()
vals[m] = data_(k,re_cols_data_[i][j][m]);
}
if(valscomp==vals){
insertval = z_[i]==-1 ? 1.0 : data_(k,z_[i]);
if(z_[i]==-1){
insertval = 1.0;
} else if (z_[i]< -1){
insertval = 999.0;
nonzero.rows.push_back(k);
} else {
insertval = data_(k,z_[i]);
}
matZ.insert(k,zcount,insertval);
}
}
zcount++;
if (z_[i]< -1) z_nonzero.push_back(nonzero);
}
}
re_temp_data_.clear();
}

inline void glmmr::Covariance::linear_predictor_ptr(glmmr::LinearPredictor* ptr){
linpred_ptr = ptr;
if(z_requires_update)Z_updater();
}

inline void glmmr::Covariance::Z_updater(){
if(z_nonzero.size() > 0)z_requires_update = true;
if(z_requires_update){
if(linpred_ptr == nullptr)throw std::runtime_error("Linpred ptr not initialised");
MatrixXd X = linpred_ptr->X();
if(z_nonzero.size() == 0)throw std::runtime_error("Non non-zero data");
for(int i = 0; i < z_nonzero.size(); i++){
for(int j = 0; j < z_nonzero[i].rows.size(); j++){
matZ.insert(z_nonzero[i].rows[j],z_nonzero[i].col,X(z_nonzero[i].rows[j],z_nonzero[i].xcol));
}
}
}
}

inline void glmmr::Covariance::update_parameters_in_calculators(){
for(int i = 0; i < B_; i++){
Expand Down Expand Up @@ -618,6 +683,7 @@ inline MatrixXd glmmr::Covariance::get_block(int b)

inline MatrixXd glmmr::Covariance::Z()
{
Z_updater();
return sparse_to_dense(matZ,false,true);
}

Expand Down Expand Up @@ -696,13 +762,15 @@ inline MatrixXd glmmr::Covariance::D_builder(int b, bool chol, bool upper)

inline sparse glmmr::Covariance::ZL_sparse()
{
Z_updater();
#if defined(R_BUILD) && defined(ENABLE_DEBUG)
Rcpp::Rcout << "\nZL multiplication: Z: " << matZ.m << " x " << matZ.n << "L: " << matL.m << " x " << matL.n;
#endif
return matZ * matL;
}

inline sparse glmmr::Covariance::Z_sparse() const{
inline sparse glmmr::Covariance::Z_sparse() {
Z_updater();
return matZ;
}

Expand Down Expand Up @@ -1005,7 +1073,7 @@ inline VectorXd glmmr::Covariance::log_gradient(const MatrixXd &umat, double& lo
size_B_array[b] += -0.5*log(var) -0.5*log(2*M_PI) - 0.5*umat(obs_counter,i)*umat(obs_counter,i)/(var);
}
size_B_array[b] *= 1.0/(double)niter;
for(int i = 0; i < npars; i++) dlogdet_vals[i] += derivs[i](obs_counter,obs_counter) / var;
for(int i = 0; i < npars; i++) dlogdet_vals[i] += derivs[i+1](obs_counter,obs_counter) / var;
} else {
zquad.setZero();
dmat_matrix.block(0,0,blocksize,blocksize) = get_chol_block(b);
Expand Down
Loading

0 comments on commit 58373a4

Please sign in to comment.