the learning phase consisted of 15 blocks, each of which had 15 trials (225 trials total).
Experiment 2
Testing - Splitting Peformance by End of Training
Display code
##| column: page-inset-rightlibrary(gghalves)ps<-dcp%>%ggplot(aes(x=Pattern.Type2,y=propCor,fill=Condition))+stat_summary(geom="bar",fun=mean,position=position_dodge())+stat_summary(geom="errorbar",fun.data=mean_se,position=position_dodge())+facet_wrap(~cq)+geom_hline(yintercept =.33,linetype="dashed")+xlab("Pattern-Type")+ylab("Proportion Correct")+ggtitle("Low vs High Performers (median split within condition - final training block) - Performance x Pattern Type")hd<-dcp%>%filter(Pattern.Type2=="New-High")%>%ggplot(aes(x=Pattern.Type2,y=propCor,fill=Condition))+geom_boxplot(outlier.shape=NA)+geom_jitter(alpha=.5)+facet_wrap(~cq)+xlab("Pattern-Type")+ggtitle("Low vs High Performers (median split within condition) - High Distortion Performance")+ylab("Proportion Correct")# dcp %>% filter(Pattern.Type2=="New-High") %>% ggplot(aes(x=Pattern.Type2,y=propCor,fill=Condition))+# geom_half_violin()+# geom_jitter(alpha=.5)+# facet_wrap(~cq)+ggtitle("Low vs High Performers (median split within condition) - High Distortion Performance")#ps#gridExtra::grid.arrange(ps,hd)p7<-dcp%>%filter(endTrain>.75)%>%ggplot(aes(x=Pattern.Type2,y=propCor,fill=Condition))+stat_summary(geom="bar",fun=mean,position=position_dodge())+stat_summary(geom="errorbar",fun.data=mean_se,position=position_dodge())+geom_hline(yintercept =.33,linetype="dashed")+xlab("Pattern-Type")+ylab("Proportion Correct")+ggtitle("Performance x Pattern Type - Only retaining sbjs with >75% accuracy in final training block")p5<-dcp%>%filter(endTrain>.50)%>%ggplot(aes(x=Pattern.Type2,y=propCor,fill=Condition))+stat_summary(geom="bar",fun=mean,position=position_dodge())+stat_summary(geom="errorbar",fun.data=mean_se,position=position_dodge())+geom_hline(yintercept =.33,linetype="dashed")+xlab("Pattern-Type")+ylab("Proportion Correct")+ggtitle("Performance x Pattern Type - Only retaining sbjs with >50% accuracy in final training block")gridExtra::grid.arrange(ps,p7,p5)
ANOVA Table (type III tests)
Effect DFn DFd F p p<.05 ges
1 End.Training 1 49 13.847 0.000511 * 0.220
2 Condition 1 49 13.131 0.000689 * 0.211
Display code
#dc2 %>% anova_test(New.High ~condit*End.Training) # no sig. interactionpwc1<-dc2%>%emmeans_test(New.High~Condition,covariate=End.Training,p.adjust.method="bonferroni")%>%add_xy_position(x ="condit", fun ="mean_se")get_emmeans(pwc1)
pwc2<-dc2%>%filter(End.Training>.33)%>%emmeans_test(New.High~Condition,covariate=End.Training,p.adjust.method="bonferroni")%>%add_xy_position(x ="condit", fun ="mean_se")pwc3<-dc2%>%filter(End.Training>.88)%>%emmeans_test(New.High~Condition,covariate=End.Training,p.adjust.method="bonferroni")%>%add_xy_position(x ="condit", fun ="mean_se")ep1<-ggline(get_emmeans(pwc1), x ="Condition", y ="emmean")+geom_errorbar(aes(ymin =conf.low, ymax =conf.high), width =0.2)+stat_pvalue_manual(pwc1, hide.ns =TRUE, tip.length =FALSE)+labs(subtitle =get_test_label(at1, detailed =TRUE),caption =get_pwc_label(pwc1),title="Estimated Marginal Means from ANCOVA - All Sbj. (n=89)")ep2<-ggline(get_emmeans(pwc2), x ="Condition", y ="emmean")+geom_errorbar(aes(ymin =conf.low, ymax =conf.high), width =0.2)+stat_pvalue_manual(pwc2, hide.ns =TRUE, tip.length =FALSE)+labs(subtitle =get_test_label(at2, detailed =TRUE),caption =get_pwc_label(pwc2), title="Estimated Marginal Means from ANCOVA - Only above chance sbj (>.33,n=87)")ep3<-ggline(get_emmeans(pwc3), x ="Condition", y ="emmean")+geom_errorbar(aes(ymin =conf.low, ymax =conf.high), width =0.2)+stat_pvalue_manual(pwc3, hide.ns =TRUE, tip.length =FALSE)+labs(subtitle =get_test_label(at3, detailed =TRUE),caption =get_pwc_label(pwc3), title="Estimated Marginal Means from ANCOVA - Only strong learners (>.88; n=52)")gg.ac1<-ggscatter(dc2,x="End.Training",y="New.High",color="Condition",add="reg.line",add.params =list(size=.3))+stat_regline_equation(aes(label=paste(..eq.label.., ..rr.label..,sep="~~~~"),color=Condition))+ggtitle("Including All Subjects (n=89)")+ylab("High Distortions - Proportion Correct")+xlab("End of Training - Proportion Correct")gg.ac2<-dc2%>%filter(End.Training>.33)%>%ggscatter(.,x="End.Training",y="New.High",color="Condition",add="reg.line",add.params =list(size=.3))+stat_regline_equation(aes(label=paste(..eq.label.., ..rr.label..,sep="~~~~"),color=Condition))+ggtitle("Retain Sbj's above chance (>.33) at train end (n=87). ")+ylab("High Distortions - Proportion Correct")+xlab("End of Training - Proportion Correct")gg.ac3<-dc2%>%filter(End.Training>.88)%>%ggscatter(.,x="End.Training",y="New.High",color="Condition",add="reg.line",add.params =list(size=.3))+stat_regline_equation(aes(label=paste(..eq.label.., ..rr.label..,sep="~~~~"),color=Condition))+ggtitle("Retain only stronger learners (>.88) at train end (n=52). ")+ylab("High Distortions - Proportion Correct")+xlab("End of Training - Proportion Correct")gtitle=" Hu & Nosofsky 2020 - Experiment 2. Effect of Condition on High Distortions - Controlling for End of Training Performance"title=ggdraw()+draw_label(gtitle,fontface ='bold',x=0,hjust=0)+theme(plot.margin =margin(0, 0, 0, 7))plot_grid(title,NULL,gg.ac1,ep1,gg.ac2,ep2,gg.ac3,ep3,ncol=2,rel_heights=c(.1,1,1,1))
Individual Learning Curves
Display code
###| column: screen-inset-rightdCatTrainAvg%>%filter(condit=="rep")%>%ggplot(aes(x=Block,y=propCor,col=condit))+stat_summary(shape=0,geom="point",fun="mean")+stat_summary(geom="line",fun="mean",col="red")+facet_wrap(~id)+ylim(c(0,1))+geom_hline(yintercept =.33,linetype="dashed")+ggtitle("Hu & Nosofsky Experiment 2 - Learning. Rep Subjects - Average Accuracy Per Block.")+xlab("Training Block")+ylab("Proportion Correct")+scale_x_continuous(breaks=seq(1,15))
Display code
dCatTrainAvg%>%filter(condit=="nrep")%>%ggplot(aes(x=Block,y=propCor,col=condit))+stat_summary(shape=2, geom="point",fun="mean",col="lightblue")+stat_summary(geom="line",fun="mean",col="lightblue")+facet_wrap(~id)+ylim(c(0,1))+geom_hline(yintercept =.33,linetype="dashed")+facet_wrap(~id)+ggtitle("Hu & Nosofsky Experiment 2 - Learning. NRep Subjects - Average Accuracy Per Block.")+xlab("Training Block")+ylab("Proportion Correct")+scale_x_continuous(breaks=seq(1,15))
Experiment 2 - separate category - learning curves
Bates, D., Mächler, M., Bolker, B., & Walker, S. (2015). Fitting linear mixed-effects models using lme4. Journal of Statistical Software, 67(1), 1–48. https://doi.org/10.18637/jss.v067.i01
Wickham, H., Averick, M., Bryan, J., Chang, W., McGowan, L. D., François, R., Grolemund, G., Hayes, A., Henry, L., Hester, J., Kuhn, M., Pedersen, T. L., Miller, E., Bache, S. M., Müller, K., Ooms, J., Robinson, D., Seidel, D. P., Spinu, V., … Yutani, H. (2019). Welcome to the tidyverse. Journal of Open Source Software, 4(43), 1686. https://doi.org/10.21105/joss.01686
Xie, Y. (2014). knitr: A comprehensive tool for reproducible research in R. In V. Stodden, F. Leisch, & R. D. Peng (Eds.), Implementing reproducible computational research. Chapman; Hall/CRC.
Xie, Y. (2015). Dynamic documents with R and knitr (2nd ed.). Chapman; Hall/CRC. https://yihui.org/knitr/
Xie, Y. (2024). knitr: A general-purpose package for dynamic report generation in r. https://yihui.org/knitr/