-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy path.Rhistory
512 lines (512 loc) · 23.4 KB
/
.Rhistory
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
#Q5: How much effort and how hard had you to work from not hard at all to very hard
#Q6: Frustration from not frustrated at all to highly frustrated
tlx_average.perQuestionPrep <- matrix(nrow = 6, ncol = 3, byrow = FALSE)
colnames(tlx_average.perQuestionPrep) <- c("Question","Pen and Paper", "VR")
tlx_average.perQuestion <- as.data.frame(tlx_average.perQuestionPrep)
#setting up the domains of the nasa-tlx as well as the drawing tool
TLX_questions <- c("mental","physical","time","performance","effort","frustration")
drawingtypes <- c("2D","3D")
#saving the average values for the single domains split by drawing task to the dataframe
for(q in TLX_questions){
tlx_average.perQuestion[match(q,TLX_questions),1] <- q
for (d in drawingtypes){
print(mean(tlx_data[tlx_data$DrawingType==d,q]))
tlx_average.perQuestion[match(q,TLX_questions),match(d,drawingtypes)+1] <- mean(tlx_data[tlx_data$DrawingType==d,q])
}
}
tlx_average.perQuestionMelted = melt(tlx_average.perQuestion, id.vars = "Question") #prepare the dataframe for the plot
#TODO:
# y min und y max für confidence intervals and find param for adding these intervals to the plot 95 percent intervalls
# https://r-graph-gallery.com/4-barplot-with-error-bar.html
ggplot(data = tlx_average.perQuestionMelted, mapping = aes(x = factor(Question,
levels = c("mental","physical","time","performance","effort","frustration"))
, y = value, fill = variable)) +
geom_col(position = position_dodge())+
labs(title= "Average Values of RAW-TLX Domains",
subtitle = "by the Drawing Tool",x = "domains",y="average ratings")+
scale_fill_manual("Drawing Tool", values = c("Pen and Paper" = "darkcyan", "VR" = "orange"))
#TODO: use the 95 percent interval and not the standart deviation
ggplot(data = d, mapping = aes(x = Question, y = value, fill = variable)) +
geom_col(position = position_dodge())+geom_errorbar( aes(x=Question, ymin=value-TLX.perQuestion_std_list, ymax=value+TLX.perQuestion_std_list), colour="black",position = position_dodge(width = 0.9))+
scale_fill_manual("Drawing Tool", values = c("Pen and Paper" = "darkcyan", "VR" = "orange"))
ggplot(tlx_data, aes(x = average, y=DrawingType, colour = DrawingType)) + # Change filling color
geom_boxplot()+ theme_classic()+theme( axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.line.y = element_blank(), axis.text.x = element_text(size = 12), axis.title.y =element_blank()
) +labs(x = "average TLX-score")+
scale_color_manual(name =" Drawingtool",
labels = c("Pen and Paper", "VR"),
values = c("darkcyan", "orange"))
summary(tlx_data[tlx_data$DrawingType=="2D", ]$average)
sd(tlx_data[tlx_data$DrawingType=="2D", ]$average)
summary(tlx_data[tlx_data$DrawingType=="3D", ]$average)
sd(tlx_data[tlx_data$DrawingType=="3D", ]$average)
# base R function for t-test requires a different df format
tlx_data_wide <- tlx_data %>%
select(ID, DrawingType, average) %>%
spread(DrawingType, average)
t.test(tlx_data_wide$`3D`, tlx_data_wide$`2D`, paired=TRUE)
#mental domain
tlx_mental_wide <- tlx_data %>%
select(ID, DrawingType, mental) %>%
spread(DrawingType, mental)
t.test(tlx_mental_wide$`3D`, tlx_mental_wide$`2D`, paired=TRUE)
#physical domain
tlx_physical_wide <- tlx_data %>%
select(ID, DrawingType, physical) %>%
spread(DrawingType, physical)
t.test(tlx_physical_wide$`3D`, tlx_physical_wide$`2D`, paired=TRUE)
#time domain
tlx_time_wide <- tlx_data %>%
select(ID, DrawingType, time) %>%
spread(DrawingType, time)
t.test(tlx_time_wide$`3D`, tlx_time_wide$`2D`, paired=TRUE)
#performance domain - higher value means higher dissatisfaction
tlx_performance_wide <- tlx_data %>%
select(ID, DrawingType, performance) %>%
spread(DrawingType, performance)
t.test(tlx_performance_wide$`3D`, tlx_performance_wide$`2D`, paired=TRUE)
#effort domain
tlx_effort_wide <- tlx_data %>%
select(ID, DrawingType, effort) %>%
spread(DrawingType, effort)# base R function for t-test requires a different df format
t.test(tlx_effort_wide$`3D`, tlx_effort_wide$`2D`, paired=TRUE)
#frustration domain
tlx_frustration_wide <- tlx_data %>%
select(ID, DrawingType, frustration) %>%
spread(DrawingType, frustration)
t.test(tlx_frustration_wide$`3D`, tlx_frustration_wide$`2D`, paired=TRUE)
tlx_2D.prep <- matrix(nrow = participant_count, ncol = 6, byrow = FALSE)
colnames(tlx_2D.prep) <- c("effort","frustration","mental","performance","physical","time")
tlx_2D <- as.data.frame(tlx_2D.prep)
tlx_3D.prep <- matrix(nrow = participant_count, ncol = 6, byrow = FALSE)
colnames(tlx_3D.prep) <- c("effort","frustration","mental","performance","physical","time", "sum", "average")
tlx_3D.prep <- matrix(nrow = participant_count, ncol = 6, byrow = FALSE)
colnames(tlx_3D.prep) <- c("effort","frustration","mental","performance","physical","time")
tlx_3D <- as.data.frame(tlx_3D.prep)
for(id in 1:participant_count){
tlx_2D[id,1] <- as.numeric(questionnaire[id,paste("PnP",1, sep = "_")])
tlx_2D[id,2] <- as.numeric(questionnaire[id,paste("PnP",2, sep = "_")])
tlx_2D[id,3] <- as.numeric(questionnaire[id,paste("PnP",3, sep = "_")])
tlx_2D[id,4] <- as.numeric(questionnaire[id,paste("PnP",4, sep = "_")])
tlx_2D[id,5] <- as.numeric(questionnaire[id,paste("PnP",5, sep = "_")])
tlx_2D[id,6] <- as.numeric(questionnaire[id,paste("PnP",6, sep = "_")])
tlx_3D[id,1] <- as.numeric(questionnaire[id,paste("VR",1, sep = "_")])
tlx_3D[id,2] <- as.numeric(questionnaire[id,paste("VR",2, sep = "_")])
tlx_3D[id,3] <- as.numeric(questionnaire[id,paste("VR",3, sep = "_")])
tlx_3D[id,4] <- as.numeric(questionnaire[id,paste("VR",4, sep = "_")])
tlx_3D[id,5] <- as.numeric(questionnaire[id,paste("VR",5, sep = "_")])
tlx_3D[id,6] <- as.numeric(questionnaire[id,paste("VR",6, sep = "_")])
}
pca_2D <- prcomp(tlx_2D, scale = TRUE)
#reverse the signs
pca_2D$rotation <- -1*pca_2D$rotation
pca_2D$rotation
#display the first six scores
head(pca_2D$x)
#calculate total variance explained by each principal component
pca_2D_explained <- pca_2D$sdev^2 / sum(pca_2D$sdev^2)
#create scree plot
qplot(c(1:6), pca_2D_explained) +
geom_line() +
xlab("Principal Component") +
ylab("Variance Explained") +
ggtitle("Scree Plot") +
ylim(0, 1)
#create scree plot
qplot(c(1:6), pca_2D_explained) +
geom_line() +
xlab("Principal Component") +
ylab("Variance Explained") +
ggtitle("Scree Plot") +
ylim(0, 1)
#Do the same for 3D sktech maps
pca_3D <- prcomp(tlx_3D, scale = TRUE)
#reverse the signs
pca_3D$rotation <- -1*pca_3D$rotation
#reverse the signs of the scores
pca_3D$x <- -1*pca_3D$x
#display the first six scores
head(pca_3D$x)
#look at the pca result
biplot(pca_3D, scale = 0)
#calculate total variance explained by each principal component
pca_3D_explained <- pca_3D$sdev^2 / sum(pca_3D$sdev^2)
#create scree plot
qplot(c(1:6), pca_3D_explained) +
geom_line() +
xlab("Principal Component") +
ylab("Variance Explained") +
ggtitle("Scree Plot") +
ylim(0, 1)
#create scree plot
qplot(c(1:6), pca_3D_explained) +
geom_line() +
xlab("Principal Component") +
ylab("Variance Explained") +
ggtitle("Scree Plot") +
ylim(0, 1)
drawings_2d.import <- read.csv(here("data/Analysis_2D.csv"))# import the csv that contains the analysis data for the 2D drawings
drawings_3d.import <- read.csv(here("data/Analysis_3D.csv")) # import the csv that contains the analysis data for the 3D drawings
duringThesisNotes.import <- read.csv(here("data/DuringThesisNotes.csv"))
# transpose the tables using data.table library
drawings_2d <- transpose(drawings_2d.import)
drawings_3d <- transpose(drawings_3d.import)
duringThesisNotes <- transpose(duringThesisNotes.import)
# and also shift the col and row names for both lists
rownames(drawings_2d) <- colnames(drawings_2d.import)
colnames(drawings_2d) <- rownames(drawings_2d.import)
rownames(drawings_3d) <- colnames(drawings_3d.import)
colnames(drawings_3d) <- rownames(drawings_3d.import)
rownames(duringThesisNotes) <- colnames(duringThesisNotes.import)
colnames(duringThesisNotes) <- rownames(duringThesisNotes.import)
# after the transpose the correct colnames are in the first row.
# Use the first row as names and delete the row for both lists
names(drawings_2d)<-drawings_2d[1,]
drawings_2d<-drawings_2d[-1,]
names(drawings_3d)<-drawings_3d[1,]
drawings_3d<-drawings_3d[-1,]
names(duringThesisNotes)<-duringThesisNotes[1,]
duringThesisNotes<-duringThesisNotes[-1,]
#create vectors for saving the sum values of the analysis with the length of the number of participants
#TODO: replace these overall values with f-score
allSum2d <- vector(length=participant_count)
allSum3d <- vector(length=participant_count)
#create more vectors for saving only the visibility sum and the correctness sum
all_Visibility2D <- vector(length=participant_count)
all_Z_Visibility2D <- vector(length=participant_count) #thinking about only analyzing the z visibility
all_Correct2D <- vector(length=participant_count)
all_Visibility3D <- vector(length=participant_count)
all_Correct3D <- vector(length=participant_count)
for(id in 1:participant_count){
#counter variables for the loop
sum2d <- 0
sum2Dvisible <- 0
sum2Dcorrect <-0
sum3d <- 0
sum3Dvisible <- 0
sum3Dcorrect <-0
# evaluate the object relations
for(i in 4:129){
# check how many are correct in 2D
sum2d <- sum2d + as.numeric(drawings_2d[id,i])
# check how many are correct in 3D
sum3d <- sum3d + as.numeric(drawings_3d[id,i])
# compare only the visibility
if ( substr(colnames(drawings_2d)[i],1, 1) == "V" ){
sum2Dvisible <- sum2Dvisible + as.numeric(drawings_2d[id,i])
sum3Dvisible <- sum3Dvisible + as.numeric(drawings_3d[id,i])
}
# compare only the correctness
#TODO: replace the absolute correctness with relative values depending on the visibility or different
if ( substr(colnames(drawings_2d)[i],1, 1) == "C" ){
sum2Dcorrect <- sum2Dcorrect + as.numeric(drawings_2d[id,i])
sum3Dcorrect <- sum3Dcorrect + as.numeric(drawings_3d[id,i])
}
}
#save the values to the vectors
allSum2d[id]<-sum2d
allSum3d[id]<-sum3d
all_Visibility2D[id]<-sum2Dvisible
all_Visibility3D[id]<-sum3Dvisible
all_Correct2D[id]<-sum2Dcorrect
all_Correct3D[id]<-sum3Dcorrect
}
#the number of relations that were analysed
# there were 7 objects compared among each other and three dimensions resulting in 63 relations
totalNumberOfRelations <- 63
users_data.prep <- matrix(nrow = participant_count*2, ncol = 7, byrow = FALSE)
colnames(users_data.prep) <- c("ID","IPT","DrawingType","order","visibility.score","correctness.score","f.score")
users_data_frame <- as.data.frame(users_data.prep)
#set up a data frame with the visibility, correctness and f-score average for every participant
users_data.prepAverage <- matrix(nrow = participant_count, ncol = 6, byrow = FALSE)
colnames(users_data.prepAverage) <- c("ID","IPT","order","visibility.score","correctness.score","f.score")
users_data_frame.averages <- as.data.frame(users_data.prepAverage)
users_data.prepFscore <- matrix(nrow = participant_count*2, ncol = 5, byrow = FALSE)
colnames(users_data.prepFscore) <- c("ID","DrawingType","precision","recall","f.score")
users_data_frame.fscore <- as.data.frame(users_data.prepFscore)
#writing the values to the dataframe
for(id in 1:participant_count){
#f-score for 2D
precision.2D <- (all_Correct2D[id]/all_Visibility2D[id])
recall.2D <- all_Correct2D[id]/totalNumberOfRelations
fscore.2D <- 2*((precision.2D*recall.2D)/(precision.2D+recall.2D))
#f-score for 3D
precision.3D <- (all_Correct3D[id]/all_Visibility3D[id])
recall.3D <- all_Correct3D[id]/totalNumberOfRelations
fscore.3D <- 2*((precision.3D*recall.3D)/(precision.3D+recall.3D))
#write data to the dataframes
users_data_frame[id,1] <- as.character(id)
users_data_frame[id,2] <- ipt_results[id]
users_data_frame[id,3] <- "2D"
users_data_frame[id,4] <- if(id %% 2 == 0){"3Dfirst"}else{"2Dfirst"}
users_data_frame[id,5] <- all_Visibility2D[id]
users_data_frame[id,6] <- precision.2D*100
users_data_frame[id,7] <- fscore.2D
users_data_frame[(id+participant_count),1] <- as.character(id)
users_data_frame[(id+participant_count),2] <- ipt_results[id]
users_data_frame[(id+participant_count),3] <- "3D"
users_data_frame[(id+participant_count),4] <- if(id %% 2 == 0){"3Dfirst"}else{"2Dfirst"}
users_data_frame[(id+participant_count),5] <- all_Visibility3D[id]
users_data_frame[(id+participant_count),6] <- precision.3D*100
users_data_frame[(id+participant_count),7] <- fscore.3D
users_data_frame.averages[id,1] <- as.character(id)
users_data_frame.averages[id,2] <- ipt_results[id]
users_data_frame.averages[id,3] <- if(id %% 2 == 0){"3Dfirst"}else{"2Dfirst"}
users_data_frame.averages[id,4] <- mean(c(all_Visibility2D[id],all_Visibility3D[id]))
users_data_frame.averages[id,5] <- mean(c((precision.2D*100),(precision.3D*100)))
users_data_frame.averages[id,6] <- mean(c(fscore.2D,fscore.3D))
users_data_frame.fscore[id,1] <- as.character(id)
users_data_frame.fscore[id,2] <- "2D"
users_data_frame.fscore[id,3] <- precision.2D
users_data_frame.fscore[id,4] <- recall.2D
users_data_frame.fscore[id,5] <- fscore.2D
users_data_frame.fscore[(id+participant_count),1] <- as.character(id)
users_data_frame.fscore[(id+participant_count),2] <- "3D"
users_data_frame.fscore[(id+participant_count),3] <- precision.3D
users_data_frame.fscore[(id+participant_count),4] <- recall.3D
users_data_frame.fscore[(id+participant_count),5] <- fscore.3D
}
#mutate drawing type, Id and order into factors
users_data_frame <- users_data_frame %>%
mutate(DrawingType = as.factor(DrawingType),
ID = as.factor(ID),
order = as.factor(order))
users_data_frame.averages <- users_data_frame.averages %>%
mutate(ID = as.factor(ID),
order = as.factor(order))
users_data_frame.fscore <- users_data_frame.fscore %>%
mutate(ID = as.factor(ID))
ggplot(users_data_frame, aes(IPT, visibility.score)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Visibility-Score Depending on the IPT")
#setting up the model
IPTmodel.visibility <- lm(visibility.score ~ IPT, data=users_data_frame.averages)
summary(IPTmodel.visibility)
#residual plot
plot(fitted(IPTmodel.visibility),residuals(IPTmodel.visibility))
#checking the correctness-score dependency from the IPT
ggplot(users_data_frame, aes(IPT, correctness.score)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Correctness-Score Depending on the IPT")
IPTmodel.correctness <- lm(correctness.score ~ IPT, data=users_data_frame.averages)
summary(IPTmodel.correctness)
#residual plot
plot(fitted(IPTmodel.correctness),residuals(IPTmodel.correctness))
#checking the f-score dependency from the IPT
ggplot(users_data_frame, aes(IPT, f.score)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "F-Score depending on the IPT")
IPTmodel.fscore <- lm(f.score ~ IPT, data=users_data_frame.averages)
summary(IPTmodel.fscore)
#residual plot
plot(fitted(IPTmodel.fscore),residuals(IPTmodel.fscore))
ggplot(users_data_frame, aes(order, visibility.score)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Visibility-Score Depending on the order of drawings")
#setting up the model
ordermodel.visibility <- lm(visibility.score ~ order, data=users_data_frame.averages)
summary(ordermodel.visibility)
#residual plot
plot(fitted(ordermodel.visibility),residuals(ordermodel.visibility))
#checking the correctness-score dependency from the IPT
ggplot(users_data_frame, aes(order, correctness.score)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Correctness-Score Depending on the order of drawings")
ordermodel.correctness <- lm(correctness.score ~ order, data=users_data_frame.averages)
summary(ordermodel.correctness)
#residual plot
plot(fitted(ordermodel.correctness),residuals(ordermodel.correctness))
#checking the f-score dependency from the IPT
ggplot(users_data_frame, aes(order, f.score)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "F-Score depending on the order of drawings")
ordermodel.fscore <- lm(f.score ~ order, data=users_data_frame.averages)
summary(ordermodel.fscore)
#residual plot
plot(fitted(ordermodel.fscore),residuals(ordermodel.fscore))
ggplot(users_data_frame, aes(DrawingType, visibility.score)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Visibility-Score depending on the drawing type",
subtitle = "all 3D scores are layerd")
#set up data frames containing only 2D or 3D data
users_data_frame.2D <-users_data_frame[users_data_frame$DrawingType=="2D",]
users_data_frame.3D <-users_data_frame[users_data_frame$DrawingType=="3D",]
#summary of 2d sketch maps' visibility-score
summary(users_data_frame.2D$visibility.score)
boxplot(users_data_frame.2D$visibility.score)
#every drawing in 3D has the maximum of 63 points
summary(users_data_frame.3D$visibility.score)
#disable scientific notation to easier compare very low numbers
options(scipen = 999)
#setting up the linear mixed effects model for the visible results only
visibility.model = lmer(visibility.score ~ IPT + order + (1|ID) + DrawingType, data=users_data_frame, REML = FALSE)
summary(visibility.model)
#show that the model is singular
performance::icc(visibility.model)#true
lme4::isSingular(visibility.model)#true
performance::check_singularity(visibility.model)#true
# base R function for t-test requires a different df format
users_data_wide <- users_data_frame %>%
select(ID, IPT, order, DrawingType, visibility.score) %>%
spread(DrawingType, visibility.score)
t.test(users_data_wide$`3D`, users_data_wide$`2D`, paired=TRUE)
#setting up the null model where the drawing type is omitted
visibility.null = lmer(visibility.score ~ IPT + order + (1|ID), data=users_data_frame, REML = FALSE)
#anova for significance
anova(visibility.null,visibility.model)
#residual plot
plot(fitted(visibility.model), resid(visibility.model, type = "pearson"))+
abline(0,0, col="red")
#setting up the null model where the drawing type is omitted
visibility.null = lmer(visibility.score ~ IPT + order + (1|ID), data=users_data_frame, REML = FALSE)
#anova for significance
anova(visibility.null,visibility.model)
#residual plot
plot(fitted(visibility.model), resid(visibility.model, type = "pearson"))+
abline(0,0, col="red")
visibility.modelSimple = lmer(visibility.score ~ (1|ID) + DrawingType, data=users_data_frame, REML = FALSE)
#this is not singular but the null model is
visibility.nullSimple = lmer(visibility.score ~ (1|ID), data=users_data_frame, REML = FALSE)
anova(visibility.nullSimple,visibility.modelSimple)
#TODO: use a relative value or something else for the correctness
#have a look at the data with the following plots
ggplot(users_data_frame, aes(DrawingType, correctness.score, colour = order)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Correctness-Score depending on drawing type",
subtitle = "and in color the different order of drawings")
ggplot(data = users_data_frame,
aes(x = IPT,
y = correctness.score,
col = as.factor(DrawingType)))+
geom_point(size = 1.5,
alpha = .7,
position = "jitter")+
geom_smooth(method = lm,
se = T,
size = 1.5,
linetype = 1,
alpha = .3)+
theme_minimal()+
labs(title = "Correlation of Relative Correctness-Score and IPT-result")+
scale_color_manual(name =" Drawingtool",
labels = c("Pen and Paper", "VR"),
values = c("darkcyan", "orange"))
#setting up the linear mixed effects model for the correctness score analysis
correctness.model = lmer(correctness.score ~ IPT + order + (1|ID) + DrawingType, data=users_data_frame, REML = FALSE)
#setting up the null model and check the significants of the model with the anova
correctness.null = lmer(correctness.score ~ IPT + order + (1|ID) , data=users_data_frame, REML = FALSE)
correctness.nullWithoutIPT = lmer(correctness.score ~ order + (1|ID) + DrawingType , data=users_data_frame, REML = FALSE)
correctness.nullWithoutOrder = lmer(correctness.score ~ IPT + (1|ID) + DrawingType , data=users_data_frame, REML = FALSE)
correctness.modelOrderOnly = lmer(correctness.score ~ order + (1|ID), data=users_data_frame, REML = FALSE)
correctness.nullOrderOnly = lmer(correctness.score ~ (1|ID), data=users_data_frame, REML = FALSE)
anova(correctness.nullOrderOnly, correctness.modelOrderOnly)
anova(correctness.null,correctness.model) #checking the significants
anova(correctness.nullWithoutIPT,correctness.model) #checking the significants
anova(correctness.nullWithoutOrder,correctness.model) #checking the significants
#residual plot
plot(fitted(correctness.model), resid(correctness.model, type = "pearson"))+
abline(0,0, col="red")
#get an overview
summary(users_data_frame$f.score)
summary(users_data_frame.2D$f.score)
summary(users_data_frame.3D$f.score)
#variance of f-scores of 2D and 3D drawings seperated
var(users_data_frame.2D$f.score)
var(users_data_frame.3D$f.score)
ggplot(users_data_frame, aes(x = f.score, y=DrawingType, colour = DrawingType)) + # Change filling color
geom_boxplot()+ theme_classic()+theme( axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.line.y = element_blank(), axis.text.x = element_text(size = 12), axis.title.y =element_blank()
) +labs(x = "F-Score")+
scale_color_manual(name =" Drawingtool",
labels = c("Pen and Paper", "VR"),
values = c("darkcyan", "orange"))
#have a look at precision and recall
ggplot(users_data_frame.fscore, aes(precision, recall, colour = DrawingType)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Precision and Recall",
subtitle = "colored are the different drawing tools")
ggplot(users_data_frame, aes(IPT, f.score, colour = DrawingType)) +
geom_point()+
geom_smooth(method = lm,
se = FALSE,
size = .5,
alpha = .8)+ # to add regression line
labs(title = "Precision and Recall",
subtitle = "colored are the different drawing tools")
#setting up the linear mixed effects model
fscore.model = lmer(f.score ~ IPT + order + DrawingType + (1|ID), data=users_data_frame, REML = FALSE)
summary(fscore.model)
#setting up the null model that does not contain the Drawing Type
fscore.null = lmer(f.score ~ IPT + order + (1|ID), data=users_data_frame, REML = FALSE)
#setting up the null model where the ipt is omitted
fscore.nullIPT = lmer(f.score ~ order + DrawingType + (1|ID), data=users_data_frame, REML = FALSE)
#setting up the null model where the order of drawing is omitted
fscore.nullOrder = lmer(f.score ~ IPT + DrawingType + (1|ID), data=users_data_frame, REML = FALSE)
#anova for checking the relevance of the different values
anova(fscore.null,fscore.model) # checking the relevance of the drawing type
anova(fscore.nullIPT,fscore.model) # checking the relevance of the IPT
anova(fscore.nullOrder,fscore.model) # checking the relevance of the order of the drawing
ggplot(data = users_data_frame,
aes(x = IPT,
y = f.score,
col = as.factor(DrawingType)))+
geom_point(size = 1.5,
alpha = .7,
position = "jitter")+
geom_smooth(method = lm,
se = T,
size = 1.5,
linetype = 1,
alpha = .3)+
theme_minimal()+
labs(title = "Correlation of F-Score and IPT-result for the 2 Drawingtools")+
scale_color_manual(name =" Drawingtool",
labels = c("Pen and Paper", "VR"),
values = c("darkcyan", "orange"))
#residual plot
plot(fitted(fscore.model), resid(fscore.model, type = "pearson"))+
abline(0,0, col="red")
#visible.plottest = lmer(score ~ IPT + (1|ID) + (1|DrawingType), data=users_data_frame.visible)
#estimates(visible.plottest)
#visualize(visible.plottest, plot="all",formula = NULL)
#TODO: is there a way to compare these two?
#TODO: create graphics
users_data_frame.2D
count(users_data_frame.2D$visibility.score == 63)
users_data_frame.2D$visibility.score == 63