################################################### ### PBI-dt - Parental Bonding Inventory ########### ### Preprocessing ################################# ################################################### # This skript provides preprocessing procedure for the German version of the Parental Bonding Instrument # based on the translation of Benz et al. (2021) # a 2- and a 3-facotr model are proposed and thus calculated here. library("psych") pbiorg <- read.table(file = "pbi_rawdata.txt", # loading the original data frame that contains your header = TRUE, sep = "\t") # define all PBI Items based on variable names (e.g. pbi.m.1, pbi.m.2 resp. pbi.f.1, pbi.f.2 etc.) - adjust according to your variable names pbi.m.items <- paste0("pbi.m.", 1:25) pbi.f.items <- paste0("pbi.f.", 1:25) pbi.raw <- pbiorg[ , c(pbi.m.items, pbi.f.items)] # select subset of PBI-dt items only # split raw data to calculate scores for mother and father separately: pbi.m.raw <- pbi.raw[ , pbi.m.items] pbi.f.raw <- pbi.raw[ , pbi.f.items] ### Correcting invalid values ########### ######################################### str(pbi.raw) # inspect pbi raw data for 1) no. of columns (50), 2) no. of rows (N of sample) and 3) range of data (always 0 - 3) which((pbi.raw < 0) | (pbi.raw > 3)) # inspect no. of invalid item scores, if output = integer(0) --> no invalid scores, ignore next step # pbi.raw[pbi.raw < 0] <- NA # first check data entry for mistakes!! # pbi.raw[pbi.raw > 3] <- NA # otherwise change invalid values to missing (NA) table(is.na(pbi.raw)) # inspect no. of NA-items and decide on correction strategy for scoreItem-function (replace by (i.e. implement = ...) "mean", "median", "none") ### Reversing negative items ########## ####################################### # In the PBI-dt from Benz et al. (2021) Items 2, 3, 4, 7, 14, 15, 16, 18, 21, 22, 24, 25 are reverse-scored items rev.pbi.items <- c(2, 3, 4, 7, 14, 15, 16, 18, 21, 22, 24, 25) # CAVE: there are other German versions of the PBI with some of them having a different item order; # for the PBI-dt item 1 is: "...spoke to me in a warm and friendly voice" and item 25 is: "... let me dress in any way I pleased" pbi.m.raw[ , rev.pbi.items] <- (0 + 3) - pbi.m.raw[ , rev.pbi.items] # (max + min) - score for mother scores pbi.f.raw[ , rev.pbi.items] <- (0 + 3) - pbi.f.raw[ , rev.pbi.items] # (max + min) - score for father scores ### Calculation of Subscale Scores #### ####################################### # In the PBI-dt you can decide between a 2-factor or a 3-factor solution. this script provides you with both options. # Please choose the subscales that match you research question best ### 2-Factor-Solution # create key list for both parents key.list.pbi <- list(care = c(1, 2, 4, 5, 6, 11, 12, 14, 16, 17, 18, 24), overprotection = c(3, 7, 8, 9, 10, 13, 15, 19, 20, 21, 22, 23, 25)) # PBI mother: keys.pbi.m <- make.keys(pbi.m.raw, key.list.pbi) pbi.m.score.res <- scoreItems(keys.pbi.m, pbi.m.raw, totals = TRUE, missing = TRUE, impute = "median", delete = FALSE) # sum scores # for scoreItems: set totals = TRUE for total sum scores (average is default); change impute for different NA-replacement ("mean", "median", "none") pbi.m.scores <- as.data.frame(pbi.m.score.res$scores) # create a data frame with both calculated subscale sum scores, can be added to any other data frame for further analysis # to save any further item analysis out of the scoreItems-output use: pbi.m.alpha <- pbi.m.score.res$alpha # Cronbach's alpha for each subscale in a matrix with 1 row (alpha) and 2 columns (each subscale) pbi.m.lambda <- pbi.m.score.res$G6 # Guttman's Lambda for reliability as well in a matrix with 1 row (alpha) and 2 columns (each subscale) pbi.m.itemscalecorr <- pbi.m.score.res$item.cor # item-scale correlations in a matrix with 25 rows (each item) and 2 columns (each subscale) pbi.m.itemscalecorrcorrected <- pbi.m.score.res$item.corrected # as above with correction for overlapping items pbi.m.subscalecorr <- pbi.m.score.res$cor # correlation matrix for all inter-scale correlations (2x2) pbi.m.subcorrcorrected <- pbi.m.score.res$corrected # correlations of all scales (below the diagonal), alpha on the diagonal, and the unattenuated correlations (above the diagonal) # Classify different bonding styles on high vs. low care/overprotection: (cut-offs according to Parker, Tupling, & Brown, 1979) # Carefully decide about cut-off -> < or <= 27/24 pbi.m.scores$pbi.m.type[pbi.m.scores$care <= 27 & pbi.m.scores$overprotection <= 13.5] <- 1 # 1 = NEG (low care and low overprotection) pbi.m.scores$pbi.m.type[pbi.m.scores$care <= 27 & pbi.m.scores$overprotection > 13.5] <- 2 # 2 = ALC (low care and high overprotection) pbi.m.scores$pbi.m.type[pbi.m.scores$care > 27 & pbi.m.scores$overprotection <= 13.5] <- 3 # 3 = OP (high care and low overprotection) pbi.m.scores$pbi.m.type[pbi.m.scores$care > 27 & pbi.m.scores$overprotection > 13.5] <- 4 # 4 = AC (high care and high overprotection) # PBI father: keys.pbi.f <- make.keys(pbi.f.raw, key.list.pbi) pbi.f.score.res <- scoreItems(keys.pbi.f, pbi.f.raw, totals = TRUE, delete = FALSE) # sum scores # for scoreItems: set totals = TRUE for total sum scores (average is default); change impute for different NA-replacement ("mean", "median", "none") pbi.f.scores <- as.data.frame(pbi.f.score.res$scores) # create a data frame with both calculated subscale sum scores, can be added to any other data frame for further analysis # to save any further item analysis out of the scoreItems-output use: pbi.f.alpha <- pbi.f.score.res$alpha # Cronbach's alpha for each subscale in a matrix with 1 row (alpha) and 6 columns (each subscale) pbi.f.lambda <- pbi.f.score.res$G6 # Guttman's Lambda for reliability as well in a matrix with 1 row (alpha) and 6 columns (each subscale) pbi.f.itemscalecorr <- pbi.f.score.res$item.cor # item-scale correlations in a matrix with 39 rows (each item) and 6 columns (each subscale) pbi.f.itemscalecorrcorrected <- pbi.f.score.res$item.corrected # as above with correction for overlapping items pbi.f.subscalecorr <- pbi.f.score.res$cor # correlation matrix for all inter-scale correlations (6x6) pbi.f.subcorrcorrected <- pbi.f.score.res$corrected # correlations of all scales (below the diagonal), alpha on the diagonal, and the unattenuated correlations (above the diagonal) # Classify different bonding styles on high vs. low care/overprotection: pbi.f.scores$pbi.f.type[pbi.f.scores$care <= 24 & pbi.f.scores$overprotection <= 12.5] <- 1 # 1 = NEG (low care and low overprotection) pbi.f.scores$pbi.f.type[pbi.f.scores$care <= 24 & pbi.f.scores$overprotection > 12.5] <- 2 # 2 = ALC (low care and high overprotection) pbi.f.scores$pbi.f.type[pbi.f.scores$care > 24 & pbi.f.scores$overprotection <= 12.5] <- 3 # 3 = OP (high care and low overprotection) pbi.f.scores$pbi.f.type[pbi.f.scores$care > 24 & pbi.f.scores$overprotection > 12.5] <- 4 # 4 = AC (high care and high overprotection) # Additional classification: early life adversity (ELA) based on bonding type mother (if other classification is needed, change script accordingly) pbi.m.scores$ela[pbi.m.scores$pbi.m.type < 3] <- 1 # 1 = with ELA, i.e. type is NEG or ALC pbi.m.scores$ela[pbi.m.scores$pbi.m.type > 2] <- 0 # 0 = no ELA, i.e. type is OP or AC # add a loop to switch missing cases to NA for(i in 1:nrow(pbi.m.raw)){ if (length(which(is.na(pbi.m.raw[i,]))) > 3) { pbi.m.scores$care[i] <- NA pbi.m.scores$overprotection[i] <- NA pbi.m.scores$pbi.m.type[i] <- NA pbi.m.scores$ela[i] <- NA } if (length(which(is.na(pbi.f.raw[i,]))) > 3) { pbi.f.scores$care[i] <- NA pbi.f.scores$overprotection[i] <- NA pbi.f.scores$pbi.f.type[i] <- NA } } ############################################################################################ ### 3-Factor-Solution # create key list for both parents key.list.pbi <- list(care = c(1, 2, 4, 5, 6, 11, 12, 14, 16, 17, 18, 24), behavior = c(3, 7, 15, 21, 22, 25), # behavior = "Discouragement of behavioral freedom" autonomy = c(8, 9 ,10, 13, 19, 20, 23)) # autonomy = "denial of psychological autonomy" # PBI mother: keys.pbi.m <- make.keys(pbi.m.raw, key.list.pbi) pbi.m.score.res <- scoreItems(keys.pbi.m, pbi.m.raw, totals = TRUE, missing = TRUE, impute = "median", delete = FALSE) # sum scores # for scoreItems: set totals = TRUE for total sum scores (average is default); change impute for different NA-replacement ("mean", "median", "none") pbi.m.scores <- as.data.frame(pbi.m.score.res$scores) # create a data frame with both calculated subscale sum scores, can be added to any other data frame for further analysis # to save any further item analysis out of the scoreItems-output use: pbi.m.alpha <- pbi.m.score.res$alpha # Cronbach's alpha for each subscale in a matrix with 1 row (alpha) and 2 columns (each subscale) pbi.m.lambda <- pbi.m.score.res$G6 # Guttman's Lambda for reliability as well in a matrix with 1 row (alpha) and 2 columns (each subscale) pbi.m.itemscalecorr <- pbi.m.score.res$item.cor # item-scale correlations in a matrix with 25 rows (each item) and 2 columns (each subscale) pbi.m.itemscalecorrcorrected <- pbi.m.score.res$item.corrected # as above with correction for overlapping items pbi.m.subscalecorr <- pbi.m.score.res$cor # correlation matrix for all inter-scale correlations (2x2) pbi.m.subcorrcorrected <- pbi.m.score.res$corrected # correlations of all scales (below the diagonal), alpha on the diagonal, and the unattenuated correlations (above the diagonal) # Cut-off scores or classification of different bonding styles are not yet available for the three factor model # PBI father: keys.pbi.f <- make.keys(pbi.f.raw, key.list.pbi) pbi.f.score.res <- scoreItems(keys.pbi.f, pbi.f.raw, totals = TRUE, delete = FALSE) # sum scores # for scoreItems: set totals = TRUE for total sum scores (average is default); change impute for different NA-replacement ("mean", "median", "none") pbi.f.scores <- as.data.frame(pbi.f.score.res$scores) # creatd a data frame with both calculated subscale sum scores, can be added to any other data frame for further analysis # to save any further item analysis out of the scoreItems-output use: pbi.f.alpha <- pbi.f.score.res$alpha # Cronbach's alpha for each subscale in a matrix with 1 row (alpha) and 6 columns (each subscale) pbi.f.lambda <- pbi.f.score.res$G6 # Guttman's Lambda for reliability as well in a matrix with 1 row (alpha) and 6 columns (each subscale) pbi.f.itemscalecorr <- pbi.f.score.res$item.cor # item-scale correlations in a matrix with 39 rows (each item) and 6 columns (each subscale) pbi.f.itemscalecorrcorrected <- pbi.f.score.res$item.corrected # as above with correction for overlapping items pbi.f.subscalecorr <- pbi.f.score.res$cor # correlation matrix for all inter-scale correlations (6x6) pbi.f.subcorrcorrected <- pbi.f.score.res$corrected # correlations of all scales (below the diagonal), alpha on the diagonal, and the unattenuated correlations (above the diagonal) # add a loop to switch missing cases to NA for(i in 1:nrow(pbi.m.raw)){ if (length(which(is.na(pbi.m.raw[i,]))) > 3) { pbi.m.scores$care[i] <- NA pbi.m.scores$behavior[i] <- NA pbi.m.scores$autonomy[i] <- NA } if (length(which(is.na(pbi.f.raw[i,]))) > 3) { pbi.f.scores$care[i] <- NA pbi.f.scores$behavior[i] <- NA pbi.f.scores$autonomy[i] <- NA } } ############################################################ ### Create and save table with all variables of interest ### ############################################################ # decide which version and variables you need for your next steps, e.g.: # (adjust your script accordingly) pbi.finalscores <- cbind(pbi$ID, pbi.m.scores, pbi.f.scores) write.table(pbi.finalscores, file = "pbi_scoredata", sep = "\t") pbi.itemresults <- cbind(t(pbi.m.alpha), t(pbi.m.lambda), pbi.m.subcorrcorrected, t(pbi.f.alpha), t(pbi.f.lambda), pbi.f.subcorrcorrected) write.table(pbi.itemresults, file = "pbi_itemresults", sep = "\t")