Chapter 3 Statistical Analysis
3.1 Deviation coding and dummy coding
Deviation coding are conducted for Language(independent variable) and valence rating(independent variable). We also conducted dummy coding for valence categories in case that interactions are significant and needed to be decomposed to detect single effect.
#Deviation coding
ReadingData_ENandNL <- ReadingData_ENandNL %>%
mutate(Language_dev = if_else(Language == "Dutch", .5, -.5),
V_Category_NeuPos_dev = if_else(V_Category == "Positive", 2/3, -1/3),
V_Category_NeuNeg_dev = if_else(V_Category == "Negative", 2/3, -1/3))
#Dummy coding
ReadingData_ENandNL <- ReadingData_ENandNL %>%
mutate(V_Category_NeuPos_dum = if_else(V_Category == "Positive", 1, 0),
V_Category_NeuNeg_dum = if_else(V_Category == "Negative", 1, 0))
3.2 Find a best-fit model
SFD(dependent variable) is skewed, thus we decided to use glmer() to perform generalised linear mixed-effect model (GLMM).
Here I left the coding for crafting models from simple to complex.
#random effects (interception)
#Step A simple model with Gamma model.
#I did not use the mean-centered variables as it gave me error message.
#mod <- glmer(WORD_FIRST_FIXATION_DURATION ~
# #Main effects
# V_Category_NeuPos_dev +
# V_Category_NeuNeg_dev +
# Language_dev +
# #Control variables
# WORD_LENGTH +
# Conc_Mean +
# #Interactions
# V_Category_NeuPos_dev:Language_dev +
# V_Category_NeuNeg_dev:Language_dev +
# #Random effects
# (1|WORD)+ (1|PP_NR), data = ReadingData_ENandNL, family = Gamma(link="identity"))
#Step B: start adding slopes
#mod3 <- glmer(WORD_FIRST_FIXATION_DURATION~
# #Main effects
# V_Category_NeuPos_dev +
# V_Category_NeuNeg_dev +
# Language_dev +
# #Control variables
# WORD_LENGTH +
# Conc_Mean +
# #Interactions
# V_Category_NeuPos_dev:Language_dev +
# V_Category_NeuNeg_dev:Language_dev +
# #Random effects
# (1|WORD)+ (1 + Language_dev + V_Category_NeuPos_dev + V_Category_NeuNeg_dev | PP_NR), data = ReadingData_ENandNL, family=Gamma(link="identity"))
#Added random slope to WORD as well as PP_NR
#mod4 <- glmer(WORD_FIRST_FIXATION_DURATION~
# #Main effects
# V_Category_NeuPos_dev +
# V_Category_NeuNeg_dev +
# Language_dev +
# #Control variables
# WORD_LENGTH +
# Conc_Mean +
# #Interactions
# V_Category_NeuPos_dev:Language_dev +
# V_Category_NeuNeg_dev:Language_dev +
# #Random effects
# (1 + Language_dev + V_Category_NeuPos_dev + V_Category_NeuNeg_dev |WORD)+ (1 + Language_dev + V_Category_NeuPos_dev + V_Category_NeuNeg_dev | PP_NR),
# data = ReadingData_ENandNL, family=Gamma(link="identity"))
#Step C: Maximal model
#mod5 <- glmer(WORD_FIRST_FIXATION_DURATION ~
# #Main effects
# V_Category_NeuPos_dev +
# V_Category_NeuNeg_dev +
# Language_dev +
# #Control variables
# WORD_LENGTH +
# Conc_Mean +
# #Interactions
# V_Category_NeuPos_dev:Language_dev +
# V_Category_NeuNeg_dev:Language_dev +
# #Random effects
# (1|WORD)+ (1+Language_dev + V_Category_NeuPos_dev +V_Category_NeuNeg_dev + Conc_Mean + WORD_LENGTH | PP_NR),
# data = ReadingData_ENandNL, family=Gamma(link="identity"))
After running multiple models, I decided to go for a maxibal model which includes random slopes and intercepts for both participants and words (subjects and items). This serves as our baseline model = mod.
mod <- glmer(WORD_FIRST_FIXATION_DURATION ~
#Main effects
V_Category_NeuPos_dev +
V_Category_NeuNeg_dev +
Language_dev +
#Control variables
Conc_Mean +
WORD_LENGTH +
#Interactions
V_Category_NeuPos_dev:Language_dev +
V_Category_NeuNeg_dev:Language_dev +
#Random effects
(1 + Language_dev + V_Category_NeuPos_dev + V_Category_NeuNeg_dev + Conc_Mean + WORD_LENGTH | WORD)+ (1 + Language_dev + V_Category_NeuPos_dev + V_Category_NeuNeg_dev + Conc_Mean + WORD_LENGTH | PP_NR),
data = ReadingData_ENandNL, family=Gamma(link="identity"), control = glmerControl(optimizer = "optimx", calc.derivs = FALSE, optCtrl = list(method = "L-BFGS-B", maxit = 10000, starttests = FALSE, kkt = FALSE)))
save(mod,file='C:/Users/ibana/OneDrive - University of Glasgow/@Year 3/For Publication/Data and Code/mod_results.RData')
Here I also created a baseline model with original categorical variables; that is, non-deviation coded variables are used for mod2.
mod2 <- glmer(WORD_FIRST_FIXATION_DURATION ~
#Main effects
V_Category +
Language +
#Control variables
Conc_Mean +
WORD_LENGTH +
#Interactions
V_Category:Language +
#Random effects
(1 + Language + V_Category + Conc_Mean + WORD_LENGTH | WORD)+ (1 + Language + V_Category + Conc_Mean + WORD_LENGTH | PP_NR),
data = ReadingData_ENandNL, family=Gamma(link="identity"), control = glmerControl(optimizer = "optimx", calc.derivs = FALSE, optCtrl = list(method = "L-BFGS-B", maxit = 10000, starttests = FALSE, kkt = FALSE)))
save(mod2,file='C:/Users/ibana/OneDrive - University of Glasgow/@Year 3/For Publication/Data and Code/mod2_results.RData')
I created the summary of the model for reporting.
modelsummary<- broom.mixed::tidy(mod, effects = c("ran_pars", "fixed"),scales = NULL, ran_prefix = NULL, conf.int = TRUE, conf.level = 0.95, conf.method = "Wald") %>%
filter(effect =="fixed")
df_modelsummary <- as.data.frame(modelsummary)
modelsummary <- data.frame(df_modelsummary$term,
round(df_modelsummary$estimate,2),
round(df_modelsummary$std.error,2),
round(df_modelsummary$statistic,2),
round(df_modelsummary$p.value,2),
round(df_modelsummary$conf.low,2),
round(df_modelsummary$conf.high,2))
names(modelsummary) <- (c("Term", "Estimate", "Standard Error", "t-value", "p-value", "95% CI (Lower)", "95% CI (Higher)"))
modelsummary$`p-value` <- ifelse(modelsummary$`p-value` <0.001, "<0.001", round(modelsummary$`p-value`, 3))
kable(modelsummary) %>% kable_styling() %>% scroll_box(width = "100%", height = "100%")
Term | Estimate | Standard Error | t-value | p-value | 95% CI (Lower) | 95% CI (Higher) |
---|---|---|---|---|---|---|
(Intercept) | 198.35 | 3.76 | 52.76 | <0.001 | 190.98 | 205.72 |
V_Category_NeuPos_dev | -5.44 | 1.41 | -3.85 | <0.001 | -8.21 | -2.67 |
V_Category_NeuNeg_dev | -0.26 | 1.71 | -0.15 | 0.88 | -3.62 | 3.10 |
Language_dev | -30.62 | 2.19 | -13.96 | <0.001 | -34.92 | -26.32 |
Conc_Mean | 0.32 | 0.63 | 0.51 | 0.61 | -0.92 | 1.56 |
WORD_LENGTH | 4.75 | 0.42 | 11.27 | <0.001 | 3.92 | 5.57 |
V_Category_NeuPos_dev:Language_dev | 6.17 | 2.58 | 2.39 | 0.02 | 1.11 | 11.22 |
V_Category_NeuNeg_dev:Language_dev | 0.26 | 3.18 | 0.08 | 0.93 | -5.97 | 6.49 |
3.3 Run model comparison for the main effect and interactions
First of all, I created reduced models (mod3, mod4, mod5) by removing main effect of valence, language, and interaction from the baseline model, respectively.
#Removing main effect of valence from mod
mod3 <- update(mod, . ~ . - V_Category_NeuPos_dev - V_Category_NeuNeg_dev)
#Removing main effect of language from mod
mod4 <- update(mod, . ~ . - Language_dev)
#Removing interaction from mod
mod5 <- update(mod, . ~ . - V_Category_NeuPos_dev:Language_dev - V_Category_NeuNeg_dev:Language_dev)
save(mod3,file='C:/Users/ibana/OneDrive - University of Glasgow/@Year 3/For Publication/Data and Code/mod3_results.RData')
save(mod4,file='C:/Users/ibana/OneDrive - University of Glasgow/@Year 3/For Publication/Data and Code/mod4_results.RData')
save(mod5,file='C:/Users/ibana/OneDrive - University of Glasgow/@Year 3/For Publication/Data and Code/mod5_results.RData')
Then I ran model comparison. Model comparison was conducted with ANOVA.
mod_mod3 <- anova(mod, mod3) #baseline model vs reduced wo valence effect
mod_mod4 <- anova(mod, mod4) #baseline model vs reduced wo language
mod_mod5 <- anova(mod, mod5) #baseline model vs reduced wo val x lang interaction
The likelihood ratio test confirmed that the effect of valence was also significant (\(\chi^{2}\)(2) = 6.81, p = .03).
npar | AIC | BIC | logLik | deviance | Chisq | Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
mod3 | 49 | 335673.2 | 336079.5 | -167787.6 | 335575.2 | NA | NA | NA |
mod | 51 | 335670.4 | 336093.3 | -167784.2 | 335568.4 | 6.810234 | 2 | 0.0332029 |
The main effect of language is also significant (\(\chi^{2}\)(1) = 25.44, p < .001).
npar | AIC | BIC | logLik | deviance | Chisq | Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
mod4 | 50 | 335693.8 | 336108.4 | -167796.9 | 335593.8 | NA | NA | NA |
mod | 51 | 335670.4 | 336093.3 | -167784.2 | 335568.4 | 25.44375 | 1 | 5e-07 |
Valence x language interaction was not statistically significant (\(\chi^{2}\)(2) = 3.40, p = .18).
npar | AIC | BIC | logLik | deviance | Chisq | Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
mod5 | 49 | 335669.8 | 336076.1 | -167785.9 | 335571.8 | NA | NA | NA |
mod | 51 | 335670.4 | 336093.3 | -167784.2 | 335568.4 | 3.404829 | 2 | 0.182243 |
3.4 Decompose the significant effects of valence
The significant main effect of valence is decomposed with emmeans() function. I used revpairwise to reverse the direction of comparison for easier reporting.
Posthoc_V_NeuPos <- emmeans(mod, list(revpairwise ~ V_Category_NeuPos_dev), adjust = "tukey")
Posthoc_V_NeuNeg <- emmeans(mod, list(revpairwise ~ V_Category_NeuNeg_dev), adjust = "tukey")
Here, 0.66 refers to Positive, -0.33 refers to Neutral.
## $`emmeans of V_Category_NeuPos_dev`
## V_Category_NeuPos_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 230 2.58 Inf 225 235
## 0.667 224 2.69 Inf 219 229
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev, Language_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuPos_dev`
## 1 estimate SE df z.ratio p.value
## 0.666666666666667 - (-0.333333333333333) -5.44 1.41 Inf -3.852 0.0001
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev, Language_dev
## $`emmeans of V_Category_NeuPos_dev`
## V_Category_NeuPos_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 230 2.58 Inf 225 235
## 0.667 224 2.69 Inf 219 229
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev, Language_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuPos_dev`
## 1 estimate SE df asymp.LCL asymp.UCL
## 0.666666666666667 - (-0.333333333333333) -5.44 1.41 Inf -8.21 -2.67
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev, Language_dev
## Confidence level used: 0.95
Thus you can interpret the table as:
Value | estimate | SE | df | z.ratio | p.value | asymp.LCL | asymp.UCL |
---|---|---|---|---|---|---|---|
Positive-Neutral | -5.44 | 1.41 | Inf | -3.852 | 0.0001 | -8.21 | -2.67 |
Here, 0.66 refers to Negative, -0.33 refers to Neutral.
## $`emmeans of V_Category_NeuNeg_dev`
## V_Category_NeuNeg_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 227 2.47 Inf 222 232
## 0.667 227 2.87 Inf 221 232
##
## Results are averaged over the levels of: V_Category_NeuPos_dev, Language_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuNeg_dev`
## 1 estimate SE df z.ratio p.value
## 0.666666666666667 - (-0.333333333333333) -0.263 1.71 Inf -0.154 0.8780
##
## Results are averaged over the levels of: V_Category_NeuPos_dev, Language_dev
## $`emmeans of V_Category_NeuNeg_dev`
## V_Category_NeuNeg_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 227 2.47 Inf 222 232
## 0.667 227 2.87 Inf 221 232
##
## Results are averaged over the levels of: V_Category_NeuPos_dev, Language_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuNeg_dev`
## 1 estimate SE df asymp.LCL asymp.UCL
## 0.666666666666667 - (-0.333333333333333) -0.263 1.71 Inf -3.62 3.1
##
## Results are averaged over the levels of: V_Category_NeuPos_dev, Language_dev
## Confidence level used: 0.95
Thus you can interpret the table as:
Value | estimate | SE | df | z.ratio | p.value | asymp.LCL | asymp.UCL |
---|---|---|---|---|---|---|---|
Negative-Neutral | -0.263 | 1.71 | Inf | -0.154 | 0.8780 | -3.62 | 3.1 |
3.5 Expolatory analysis: compare positive vs negative
ReadingData_ENandNL_PosNeg <- ReadingData_ENandNL %>%
select(-V_Category_NeuPos_dev, - V_Category_NeuNeg_dev) %>%
mutate(V_Category_PosNeu_dev = if_else(V_Category == "Neutral", 2/3, -1/3),
V_Category_PosNeg_dev = if_else(V_Category == "Negative", 2/3, -1/3))
mod6 <- glmer(WORD_FIRST_FIXATION_DURATION ~
#Main effects
V_Category_PosNeu_dev +
V_Category_PosNeg_dev +
Language_dev +
#Control variables
Conc_Mean +
WORD_LENGTH +
#Interactions
V_Category_PosNeu_dev:Language_dev +
V_Category_PosNeg_dev:Language_dev +
#Random effects
(1 + Language_dev + V_Category_PosNeu_dev + V_Category_PosNeg_dev + Conc_Mean + WORD_LENGTH | WORD)+ (1 + Language_dev + V_Category_PosNeu_dev + V_Category_PosNeg_dev + Conc_Mean + WORD_LENGTH | PP_NR),
data = ReadingData_ENandNL_PosNeg, family=Gamma(link="identity"), control = glmerControl(optimizer = "optimx", calc.derivs = FALSE, optCtrl = list(method = "L-BFGS-B", maxit = 10000, starttests = FALSE, kkt = FALSE)))
save(mod6,file='C:/Users/ibana/OneDrive - University of Glasgow/@Year 3/For Publication/Data and Code/mod6_results.RData')
The effect of valence is decomposed with emmeans().
Posthoc_V_PosNeg <- emmeans(mod6, list(pairwise ~ V_Category_PosNeg_dev), adjust = "tukey")
Posthoc_V_PosNeg
## $`emmeans of V_Category_PosNeg_dev`
## V_Category_PosNeg_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 227 2.47 Inf 222 232
## 0.667 232 3.04 Inf 226 238
##
## Results are averaged over the levels of: V_Category_PosNeu_dev, Language_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_PosNeg_dev`
## 1 estimate SE df z.ratio p.value
## (-0.333333333333333) - 0.666666666666667 -5.29 2.01 Inf -2.631 0.0085
##
## Results are averaged over the levels of: V_Category_PosNeu_dev, Language_dev
## $`emmeans of V_Category_PosNeg_dev`
## V_Category_PosNeg_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 227 2.47 Inf 222 232
## 0.667 232 3.04 Inf 226 238
##
## Results are averaged over the levels of: V_Category_PosNeu_dev, Language_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_PosNeg_dev`
## 1 estimate SE df asymp.LCL asymp.UCL
## (-0.333333333333333) - 0.666666666666667 -5.29 2.01 Inf -9.23 -1.35
##
## Results are averaged over the levels of: V_Category_PosNeu_dev, Language_dev
## Confidence level used: 0.95
Here -0.33 refers to Positive and 0.66 refers to Negative. Thus you can interpret the table as:
Value | estimate | SE | df | z.ratio | p.value | asymp.LCL | asymp.UCL |
---|---|---|---|---|---|---|---|
Positive-Negative | -5.29 | 2.01 | Inf | -2.631 | 0.0085 | -9.23 | -1.35 |
3.6 Decompose the non-significant effect of interaction
The model comparison did not provide significant results for interaction of Valence x Language, but mod showed V_Category_NeuPos_dev:Language_dev was significant. Decomposing the results here to look into it deeply.
Posthoc_Int_NeuPosLang <- emmeans(mod, list(revpairwise ~ V_Category_NeuPos_dev:Language_dev), adjust = "tukey" )
Posthoc_Int_NeuNegLang <- emmeans(mod, list(revpairwise ~ V_Category_NeuNeg_dev:Language_dev), adjust = "tukey" )
Posthoc_Int_PosNegLang <- emmeans(mod6, list(pairwise ~ V_Category_PosNeg_dev:Language_dev), adjust = "tukey" )
The Language_dev -0.5 refers to English, 0.5 refers to English. To decompose the non-significant interaction of valence x language, we only look at the pairwise difference with same languages, which are on the top and bottom rows.
## $`emmeans of V_Category_NeuPos_dev, Language_dev`
## V_Category_NeuPos_dev Language_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 -0.5 246 3.04 Inf 240 252
## 0.667 -0.5 237 3.17 Inf 231 244
## -0.333 0.5 213 2.62 Inf 208 218
## 0.667 0.5 211 3.10 Inf 205 217
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuPos_dev, Language_dev`
## 1 estimate SE df z.ratio
## (0.666666666666667 -0.5) - (-0.333333333333333 -0.5) -8.52 1.78 Inf -4.778
## (-0.333333333333333 0.5) - (-0.333333333333333 -0.5) -32.63 2.39 Inf -13.679
## (-0.333333333333333 0.5) - (0.666666666666667 -0.5) -24.11 2.64 Inf -9.120
## 0.666666666666667 0.5 - (-0.333333333333333 -0.5) -34.99 3.12 Inf -11.225
## 0.666666666666667 0.5 - (0.666666666666667 -0.5) -26.47 3.22 Inf -8.226
## 0.666666666666667 0.5 - (-0.333333333333333 0.5) -2.36 2.03 Inf -1.159
## p.value
## <.0001
## <.0001
## <.0001
## <.0001
## <.0001
## 0.6526
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev
## P value adjustment: tukey method for comparing a family of 4 estimates
## $`emmeans of V_Category_NeuPos_dev, Language_dev`
## V_Category_NeuPos_dev Language_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 -0.5 246 3.04 Inf 240 252
## 0.667 -0.5 237 3.17 Inf 231 244
## -0.333 0.5 213 2.62 Inf 208 218
## 0.667 0.5 211 3.10 Inf 205 217
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuPos_dev, Language_dev`
## 1 estimate SE df
## (0.666666666666667 -0.5) - (-0.333333333333333 -0.5) -8.52 1.78 Inf
## (-0.333333333333333 0.5) - (-0.333333333333333 -0.5) -32.63 2.39 Inf
## (-0.333333333333333 0.5) - (0.666666666666667 -0.5) -24.11 2.64 Inf
## 0.666666666666667 0.5 - (-0.333333333333333 -0.5) -34.99 3.12 Inf
## 0.666666666666667 0.5 - (0.666666666666667 -0.5) -26.47 3.22 Inf
## 0.666666666666667 0.5 - (-0.333333333333333 0.5) -2.36 2.03 Inf
## asymp.LCL asymp.UCL
## -13.11 -3.94
## -38.76 -26.50
## -30.90 -17.32
## -43.00 -26.98
## -34.73 -18.20
## -7.58 2.87
##
## Results are averaged over the levels of: V_Category_NeuNeg_dev
## Confidence level used: 0.95
## Conf-level adjustment: tukey method for comparing a family of 4 estimates
In the above table, 0.66 refers to Positive, 0.33 refers to Neutral. As we only focus on the top and bottom rows of the data, we extract these results:
Language | Interaction | estimate | SE | df | z.ratio | p.value | asymp.LCL | asymp.UCL |
---|---|---|---|---|---|---|---|---|
English | Positive-Neutral | -8.52 | 1.78 | Inf | -4.778 | <.0001 | -13.11 | -3.94 |
Dutch | Positive-Neutral | -2.36 | 2.03 | Inf | -1.159 | 0.6526 | -7.58 | 2.87 |
We will do the same for the other two tables.
## $`emmeans of V_Category_NeuNeg_dev, Language_dev`
## V_Category_NeuNeg_dev Language_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 -0.5 242 2.86 Inf 236 247
## 0.667 -0.5 241 3.50 Inf 235 248
## -0.333 0.5 212 2.54 Inf 207 217
## 0.667 0.5 212 3.27 Inf 206 218
##
## Results are averaged over the levels of: V_Category_NeuPos_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuNeg_dev, Language_dev`
## 1 estimate SE df z.ratio
## (0.666666666666667 -0.5) - (-0.333333333333333 -0.5) -0.393 2.34 Inf -0.168
## (-0.333333333333333 0.5) - (-0.333333333333333 -0.5) -29.679 2.20 Inf -13.489
## (-0.333333333333333 0.5) - (0.666666666666667 -0.5) -29.287 3.02 Inf -9.694
## 0.666666666666667 0.5 - (-0.333333333333333 -0.5) -29.813 3.08 Inf -9.691
## 0.666666666666667 0.5 - (0.666666666666667 -0.5) -29.420 3.59 Inf -8.184
## 0.666666666666667 0.5 - (-0.333333333333333 0.5) -0.133 2.33 Inf -0.057
## p.value
## 0.9983
## <.0001
## <.0001
## <.0001
## <.0001
## 0.9999
##
## Results are averaged over the levels of: V_Category_NeuPos_dev
## P value adjustment: tukey method for comparing a family of 4 estimates
## $`emmeans of V_Category_NeuNeg_dev, Language_dev`
## V_Category_NeuNeg_dev Language_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 -0.5 242 2.86 Inf 236 247
## 0.667 -0.5 241 3.50 Inf 235 248
## -0.333 0.5 212 2.54 Inf 207 217
## 0.667 0.5 212 3.27 Inf 206 218
##
## Results are averaged over the levels of: V_Category_NeuPos_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_NeuNeg_dev, Language_dev`
## 1 estimate SE df
## (0.666666666666667 -0.5) - (-0.333333333333333 -0.5) -0.393 2.34 Inf
## (-0.333333333333333 0.5) - (-0.333333333333333 -0.5) -29.679 2.20 Inf
## (-0.333333333333333 0.5) - (0.666666666666667 -0.5) -29.287 3.02 Inf
## 0.666666666666667 0.5 - (-0.333333333333333 -0.5) -29.813 3.08 Inf
## 0.666666666666667 0.5 - (0.666666666666667 -0.5) -29.420 3.59 Inf
## 0.666666666666667 0.5 - (-0.333333333333333 0.5) -0.133 2.33 Inf
## asymp.LCL asymp.UCL
## -6.40 5.62
## -35.33 -24.03
## -37.05 -21.53
## -37.72 -21.91
## -38.65 -20.19
## -6.13 5.86
##
## Results are averaged over the levels of: V_Category_NeuPos_dev
## Confidence level used: 0.95
## Conf-level adjustment: tukey method for comparing a family of 4 estimates
Language | Interaction | estimate | SE | df | z.ratio | p.value | asymp.LCL | asymp.UCL |
---|---|---|---|---|---|---|---|---|
English | Negative-Neutral | -0.393 | 2.34 | Inf | -0.168 | 0.9983 | -6.40 | 5.62 |
Dutch | Negative-Neutral | -0.133 | 2.33 | Inf | -0.057 | 0.9999 | -6.13 | 5.86 |
## $`emmeans of V_Category_PosNeg_dev, Language_dev`
## V_Category_PosNeg_dev Language_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 -0.5 242 2.86 Inf 236 247
## 0.667 -0.5 250 3.72 Inf 243 257
## -0.333 0.5 212 2.54 Inf 207 217
## 0.667 0.5 214 3.33 Inf 208 221
##
## Results are averaged over the levels of: V_Category_PosNeu_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_PosNeg_dev, Language_dev`
## 1 estimate SE df z.ratio
## (-0.333333333333333 -0.5) - (0.666666666666667 -0.5) -8.31 2.63 Inf -3.156
## (-0.333333333333333 -0.5) - (-0.333333333333333 0.5) 29.66 2.20 Inf 13.489
## (-0.333333333333333 -0.5) - 0.666666666666667 0.5 27.39 2.98 Inf 9.195
## (0.666666666666667 -0.5) - (-0.333333333333333 0.5) 37.97 3.26 Inf 11.665
## (0.666666666666667 -0.5) - 0.666666666666667 0.5 35.70 3.59 Inf 9.950
## (-0.333333333333333 0.5) - 0.666666666666667 0.5 -2.27 2.74 Inf -0.830
## p.value
## 0.0087
## <.0001
## <.0001
## <.0001
## <.0001
## 0.8403
##
## Results are averaged over the levels of: V_Category_PosNeu_dev
## P value adjustment: tukey method for comparing a family of 4 estimates
## $`emmeans of V_Category_PosNeg_dev, Language_dev`
## V_Category_PosNeg_dev Language_dev emmean SE df asymp.LCL asymp.UCL
## -0.333 -0.5 242 2.86 Inf 236 247
## 0.667 -0.5 250 3.72 Inf 243 257
## -0.333 0.5 212 2.54 Inf 207 217
## 0.667 0.5 214 3.33 Inf 208 221
##
## Results are averaged over the levels of: V_Category_PosNeu_dev
## Confidence level used: 0.95
##
## $`pairwise differences of V_Category_PosNeg_dev, Language_dev`
## 1 estimate SE df
## (-0.333333333333333 -0.5) - (0.666666666666667 -0.5) -8.31 2.63 Inf
## (-0.333333333333333 -0.5) - (-0.333333333333333 0.5) 29.66 2.20 Inf
## (-0.333333333333333 -0.5) - 0.666666666666667 0.5 27.39 2.98 Inf
## (0.666666666666667 -0.5) - (-0.333333333333333 0.5) 37.97 3.26 Inf
## (0.666666666666667 -0.5) - 0.666666666666667 0.5 35.70 3.59 Inf
## (-0.333333333333333 0.5) - 0.666666666666667 0.5 -2.27 2.74 Inf
## asymp.LCL asymp.UCL
## -15.1 -1.55
## 24.0 35.31
## 19.7 35.04
## 29.6 46.34
## 26.5 44.92
## -9.3 4.76
##
## Results are averaged over the levels of: V_Category_PosNeu_dev
## Confidence level used: 0.95
## Conf-level adjustment: tukey method for comparing a family of 4 estimates
Language | Interaction | estimate | SE | df | z.ratio | p.value | asymp.LCL | asymp.UCL |
---|---|---|---|---|---|---|---|---|
English | Positive-Negative | -8.31 | 2.63 | Inf | -3.156 | 0.0087 | -15.1 | -1.55 |
Dutch | Positive-Negative | -2.27 | 2.74 | Inf | -0.830 | 0.8403 | -9.3 | 4.76 |