######################### STRparse Main (Python) ######################### import sys, os, argparse globdir = os.path.dirname(os.path.realpath(__file__)) class STRparse(object): def __init__(self): parser = argparse.ArgumentParser( description='Tool for managing data generated by STR genotyping tools', usage='''STRparse [] The avaialble commands are: vcfparse Collects relevant data from VCF files. summaries Summarizes data generated by vcfparse. jsonparse Collects coverage data from JSON files. fullanalysis Exectutes all STRparse commands. ''') parser.add_argument('command', help='Subcommand to run') args = parser.parse_args(sys.argv[1:2]) if not hasattr(self, args.command): print('Unrecognized command') parser.print_help() exit(1) getattr(self, args.command)() def fullanalysis(self): global globdir sys.path.insert(1, globdir + "/Source/fullanalysis") parser = argparse.ArgumentParser(description='Run the all STRparse functions (VCF & JSON parse, filter, and summarize).') parser.add_argument('-v', '--vcfinput', type=str, metavar="", required=True, help='VCF Input File Path') parser.add_argument('-j', '--jsoninput', type=str, metavar="", required=True, help='JSON Input File Path') parser.add_argument('-o', '--output', type=str, metavar="", required=True, help='Output Files Path') parser.add_argument('-s', '--savename', type=str, metavar="", required=True, help='Files Save-name') parser.add_argument('-b', '--gbuild', type=str, metavar="", required=True, help='Specify reference genome build in use (19, 37 or 38)') args = parser.parse_args(sys.argv[2:]) from fullanalysis_main import main main(args.vcfinput, args.jsoninput, args.output, args.savename, args.gbuild, globdir) def vcfparse(self): global globdir sys.path.insert(1, globdir + "/Source/vcfparse") parser = argparse.ArgumentParser(description='Select VCF directory to be parsed.') parser.add_argument('-i', '--vcfinput', type=str, metavar="", required=True, help='VCF Input File Path') parser.add_argument('-o', '--output', type=str, metavar="", required=True, help='CSV Output File Path') parser.add_argument('-s', '--savename', type=str, metavar="", required=True, help='CSV File Name') args = parser.parse_args(sys.argv[2:]) from vcfparse_main import main main(args.vcfinput, args.output, args.savename, globdir) def summaries(self): global globdir sys.path.insert(1, globdir + "/Source/summaries") parser = argparse.ArgumentParser(description='Summarise STR reads') parser.add_argument('-i', '--csvinput', type=str, metavar="", required=True, help='Input CSV file path') parser.add_argument('-o', '--summaryoutput', type=str, metavar="", required=True, help='Summary output file path') parser.add_argument('-s', '--savename', type=str, metavar="", required=True, help='Provide job identifier name') parser.add_argument('-b', '--gbuild', type=str, metavar="", required=True, help='Specify reference genome build in use (19, 37 or 38)') args = parser.parse_args(sys.argv[2:]) from summaries_main import main main(args.csvinput, args.summaryoutput, args.savename, args.gbuild, globdir) def jsonparse(self): global globdir sys.path.insert(1, globdir + "/Source/jsonparse") parser = argparse.ArgumentParser(description='Select JSON directory to be parsed.') parser.add_argument('-i', '--jsoninput', type=str, metavar="", required=True, help='VCF Input File Path') parser.add_argument('-o', '--output', type=str, metavar="", required=True, help='CSV Output File Path') parser.add_argument('-s', '--savename', type=str, metavar="", required=True, help='JSON Output File Name') args = parser.parse_args(sys.argv[2:]) from jsonparse_main import main main(args.jsoninput, args.output, args.savename, globdir) def filter(self): global globdir sys.path.insert(1, globdir + "/Source/filter") parser = argparse.ArgumentParser(description='Select VCF $ JSON files to filter.') parser.add_argument('-v', '--vcfinput', type=str, metavar="", required=True, help='VCF Input File Path') parser.add_argument('-j', '--jsoninput', type=str, metavar="", required=True, help='JSON Input File Path') parser.add_argument('-o', '--output', type=str, metavar="", required=True, help='Output Files Path') parser.add_argument('-s', '--savename', type=str, metavar="", required=True, help='Files Save-name') args = parser.parse_args(sys.argv[2:]) from filter_main import main main(args.vcfinput, args.jsoninput, args.output, args.savename, globdir) if __name__ == '__main__': STRparse() ######################### Full analysis Main (Python) ######################### # IMPORT LIBRARIES / MODULES import sys, glob, pandas, json, os # MAIN FUNCTION def main(vcfinput, jsoninput, output, savename, build, globdir): # CHECK FILE PATHS ARE COMPLETE if output[-1] != "/": output += "/" if vcfinput[-1] != "/": vcfinput += "/" if jsoninput[-1] != "/": jsoninput += "/" # PARSE VCF FILES sys.path.insert(1, globdir + "/Source/vcfparse") from vcfparse_main import main main(vcfinput, output, savename) # PARSE JSON FILES sys.path.insert(1, globdir + "/Source/jsonparse") from jsonparse_main import main main(jsoninput, output, savename) # FILTER READS sys.path.insert(1, globdir + "/Source/filter") from filter_main import main reads_file_path = output + "CGG_Repeats_" + savename + ".csv" coverage_file_path = output + "ReadCoverage_" + savename + ".csv" main(reads_file_path, coverage_file_path, output, savename) # RUN SUMMARY FUNCTIONS sys.path.insert(1, globdir + "/Source/summaries") from summaries_main import main reads_filtered_path = output + "CGG_Repeats_" + savename + "_filtered.csv" main(reads_filtered_path, output, savename, build) ######################### VCFparse Main and Functions (Python) ######################### # IMPORT LIBRARIES import glob, pandas # OUTPUT FUNCTION def Output(vcf_data, output, savename): df = pandas.DataFrame(vcf_data) df.to_csv(output+"CGG_Repeats_" + savename + ".csv", sep='\t', index=None, header=True) print("Output CSV file can be found at: "+output+"CGG_Repeats_" + savename + ".csv") print("############### COMPLETE ###############") # MAIN FUNCTION def main(vcfinput, output, savename, globdir): # CHECK FILE PATHS if output[-1] != "/": output += "/" if vcfinput[-1] != "/": vcfinput += "/" # FETCH VCF FILES files = glob.glob(vcfinput+'**/*.vcf') if files == []: files = glob.glob(vcfinput+'*.vcf') if files == []: print("WARNING: \t No VCF files were found at the specified directory.") exit(1) # RUN VCF PARSE FUNCTION from From_ExpansionHunter import ExpansionHunter ExpansionHunter(files, output, savename) import os, vcf def ExpansionHunter(vcffiles, output, savename): # IMPORT VCF FILES AND DESIRED EXTRACT DATA print("############### IMPORTING VCF DATA ###############") count = 0 f = open(output+"CGG_Repeats_"+savename+".csv","w+") f.write('Call,Sample_ID,Chr,Start,End,GT,Ref_Units,Allele1_Units,Allele2_Units'.replace(',', '\t')) f.close() for x in vcffiles: f = open(output+"CGG_Repeats_"+savename+".csv","a+") count += 1 base = os.path.splitext(os.path.basename(x))[0] print("Processing VCF file: ", base, "\tFile no.: ", count) my_vcf = vcf.Reader(filename=x) for record in my_vcf: samp = record.samples for i in samp: if i['GT'] == ".": continue Call = record.INFO['REPID'] chrom = record.CHROM start = int(record.POS) end = int(record.INFO["END"]) units = i['REPCN'] gt = i['GT'] ref = round(record.INFO["REF"]) if '/' not in units: Allele1_Units=units Allele2_Units=0 else: Allele1_Units=units.split("/")[0] Allele2_Units=units.split("/")[1] strings = [str(Call), str(base), str(chrom), str(start), str(end), str(gt), str(ref), str(Allele1_Units), str(Allele2_Units)] f.write('\n') f.write("\t".join(strings)) f.close() ######################### JSONparse Main and Functions (Python) ######################### # IMPORT MODULES import json, pandas as pd, glob, os # DEFINE MAIN FUNCTION def main(input_path, output_path, savename, globdir): # ENSURE CORRECT FILE PATH if output_path[-1] != "/": output_path += "/" if input_path[-1] != "/": input_path += "/" # COLLECT JSON FILES FROM SPECIFIED INPUT PATH files = glob.glob(input_path+'**/*.json') if files == []: files = glob.glob(input_path+'*.json') if files == []: print("WARNING: There are no .json files in this directory") exit(1) # INITIALIZE STOREAGE FILE f = open(output_path+"ReadCoverage_"+savename+".csv", 'w+') f.write("Sample_ID,Call_ID,MaxSpanningRead,MaxFlankingRead,MaxInrepeatRead".replace(',', '\t')) f.close() count=0 # LOOP THROUGH ALL JSON FILES AT GIVEN DIRECTORY print("############### IMPORTING JSON DATA ###############") for f in files: with open(f) as json_file: data = json.load(json_file)["LocusResults"] base = str(os.path.splitext(os.path.basename(f))[0].split('.json')[0]) count += 1 print("Processing JSON file: ", base, "\tFile no.: ", count) # LOOP THROUGH EACH READ WITHIN THE CURRENT JSON FILE for x in data: repeat = data[x] # ENSURE THAT READ WAS DECTECTED AND DATA IS PRESENT if len(repeat) < 5: continue else: # COLLECT THE LENGTH AND NUMBER OF FLANKING, SPANNING, AND INREPEAT READS span = str(repeat['Variants'][x]['CountsOfSpanningReads']) flank = str(repeat['Variants'][x]['CountsOfFlankingReads']) inread = str(repeat['Variants'][x]['CountsOfInrepeatReads']) strings = [base, x, span, flank, inread] f = open(output_path+"ReadCoverage_"+savename+".csv", 'a+') f.write('\n') f.write('\t'.join(strings)) f.close() ######################### Filter Main and Functions (Python) ######################### import subprocess # MAIN FUNCTION def main(reads, coverage, output, savename, globdir): # CHECK FILE PATHS if output[-1] != "/": output += "/" # RUN R FILTER SCRIPTS try: print("############### FILTERING OUT POOR COVERAGE READS ###############") subprocess.call(globdir + "/Source/filter/filter.R -d " + globdir + " -r " + reads + " -c " + coverage + " -o " + output + "CGG_Repeats_" + savename + "_filtered.csv", shell=True) print("Filtered reads file can be found at: " + output + "CGG_Repeats_" + savename + "_filtered.csv") except: sys.exit(print("Error: Failed to filter reads based coverage")) filter_by_coverage <- function(reads, coverage){ # RENAME COlUMNS names(reads) <- c("Call_ID", "Sample_ID", "Chr", "Start", "End", "GT", "Ref_Units", "Allele1_Units", "Allele2_Units") # MAKE JSON DATA ACCESSIBLE coverage$MaxFlankingRead <- str_replace_all(coverage$MaxFlankingRead, "\\(", "") coverage$MaxFlankingRead <- str_replace_all(coverage$MaxFlankingRead, "\\)", "") coverage$MaxFlankingRead <- str_replace_all(coverage$MaxFlankingRead, " ", "") coverage$MaxFlankingRead <- lapply(str_split(coverage$MaxFlankingRead, ','), as.integer) coverage$MaxFlankingRead <- lapply(coverage$MaxFlankingRead, max) # REMOVE NON / NA READS reads <- reads[complete.cases(reads), ] reads <- reads[!(reads$Allele1_Units=="." | reads$Allele2_Units=="."),] # MERGE READ AND COVERAGE DATAFRAMES df2 <- merge(reads, coverage, x.by=c("Call_ID", "Sample_ID")) # DELETE NON-MERGED DATAFRAMES rm(reads, coverage) # FILTER OUT BASED ON POOR COVERAGE df2 <- subset( df2, !( df2$MaxSpanningRead == "()" & df2$MaxInrepeatRead == "()" & df2$MaxFlankingRead < df2$Ref_Units ), select = c("Call_ID", "Sample_ID", "Chr", "Start", "End", "GT", "Ref_Units", "Allele1_Units", "Allele2_Units") ) # RETURN FILTERED DATA FRAME return(df2) } ######################### Summaries Main and Functions (Python) ######################### import subprocess, sys # MAIN FUNCTION def main(reads, output, savename, build, globdir): # CHECK FILE PATHS if output[-1] != "/": output += "/" # RUN R FILTER SCRIPTS try: print("############### SUMMARISING REPEAT DATA ###############") subprocess.call(globdir + "/Source/summaries/summaries.R -d "+ globdir +" -r " + reads + " -o " + output + " -s " + savename + " -b " + build, shell=True) print(savename + " summary files can be found in directory: " + output) except: sys.exit(print("Error: Failed to summarize repeat data")) by_locus = function(df, output, savename, build){ # Housekeeping df$All1 <- ifelse(df$All1 == 0, df$All2, df$All1) df$All2 <- ifelse(df$All2 == 0, df$All1, df$All2) df$EE50 <- ifelse(df$All1 >= 50 | df$All2 >= 50, 1, 0) df$EE100 <- ifelse(df$All1 >= 100 | df$All2 >= 100, 1, 0) df$avealleles <- (df$All1+df$All2)/2 # Summarize mdata <- melt(df[c("Call_ID", "All2", "All2")], id="Call_ID") medi <- aggregate(list("Median" = mdata$value), by=list("Call_ID" = mdata$Call_ID), median) medi <- medi[order(medi$Call_ID),] df <- merge(df,medi,by="Call_ID") df$stab <- ifelse(df$All1!=df$Median & df$All2!=df$Median, 1, ifelse(df$All1==df$Median & df$All2!=df$Median, 0.5, ifelse(df$All1!=df$Median & df$All2==df$Median, 0.5, 0))) ref <- aggregate(list("Ref_Units"=df$Ref_Units), by = list("Call_ID"=df$Call_ID, "Chr"=df$Chr, "Start"=df$Start), FUN = mean) avera <- aggregate(list("Mean"=df$avealleles), by = list("Call_ID"=df$Call_ID), FUN = mean) avera$Mean <- round(avera$Mean) counts <- aggregate(list("Count"=df$Call_ID), by = list("Call_ID"=df$Call_ID), FUN = length) stability <- aggregate(list("Stab"=df$stab), by = list("Call_ID"=df$Call_ID), FUN = mean) maxreps <- aggregate(list("All1"=df$All1, "All2"=df$All2), by = list("Call_ID"=df$Call_ID), FUN = max) maxreps$max <- pmax(maxreps$All1, maxreps$All2) minreps <- aggregate(list("All1"=df$All1, "All2"=df$All2), by = list("Call_ID"=df$Call_ID), FUN = min) minreps$min <- pmin(minreps$All1, minreps$All2) stddev <- aggregate((All1+All2)~Call_ID, df, sd) ee50 <- aggregate(list("EE50"=df$EE50), by = list("Call_ID"=df$Call_ID), FUN = sum) ee100 <- aggregate(list("EE100"=df$EE100), by = list("Call_ID"=df$Call_ID), FUN = sum) rm(df) compile <- Reduce(function(x, y) merge(x, y, all=TRUE), list(ref, avera, medi, minreps[c("Call_ID", "min")], maxreps[c("Call_ID", "max")], counts, stability, stddev, ee50, ee100)) compile$Status <- ifelse(compile$Stab == 0, "STABLE", "POLYMORPHIC") # Annotate Repeats anno_dir <- paste0(output, "/Annonvar") system(paste0("mkdir ", anno_dir)) anno_out <- paste0(anno_dir, "/", savename) anno <- compile[c("Chr", "Start")] anno$End <- (anno$Start)+(3*compile$Ref_Units-1) anno$RA <- 0 anno$AA <- "-" write.table(anno, paste0(anno_out, "_Anno.csv"), quote = FALSE, col.names = FALSE, row.names = FALSE, sep="\t") system(paste0("perl /home/dannear/Binaries/annovar/annotate_variation.pl -out ", anno_dir, "/", savename, "_Annovar -build ", paste0("hg", build)," ", anno_out, "_Anno.csv /home/dannear/Binaries/annovar/humandb")) genes <- read.csv(paste0(anno_out, "_Annovar.variant_function"), sep = '\t', header = FALSE) genes$V1 <- revalue(genes$V1, c("upstream;downstream"="intergenic", "splicing"="intronic", "ncRNA_exonic"="ncRNA", "ncRNA_intronic"="ncRNA")) genes$V2 <- gsub("\\s*\\([^\\)]+\\)", "", genes$V2) genes$V2 <- gsub(",", "->", genes$V2) genes$V2 <- gsub(";", "->", genes$V2) genes$V7 <- paste(genes$V3, genes$V4, sep=".") genes$V7 <- gsub("chr", "", genes$V7) genes <- genes[c("V7", "V2", "V1")] names(genes) <- c("Call_ID", "Gene", "Region") compile <- merge(compile, genes, by="Call_ID") names(compile) <- c("Call_ID", "Chr", "Start", "Ref_Units", "Mean_Units", "Med_Units", "Min_Units", "Max_Units", "Hits", "Instability_Rating", "SD", "EE50", "EE100", "Status", "Gene", "Region") compile$Instability_Rating <- round(compile$Instability_Rating, 3) compile$SD <- round(compile$SD, 3) write.table(compile, paste0(output, savename, "_by_locus.csv"), quote = FALSE, col.names = TRUE, row.names = FALSE, sep="\t") return(compile) } ratio_of_stability = function(df, output) { df <- df[c("Call_ID", "Mean_Units", "Med_Units", "Instability_Rating", "Status")] df$Med_Units <- round(df$Med_Units) unstable_reps <- subset(df, df$Status != "STABLE") stable_reps <- subset(df, df$Status == "STABLE") count_stable <- aggregate(list("Stable_Reps"=stable_reps$Med_Units), by = list("Med_Units"=stable_reps$Med_Units), FUN = length) count_unstable <- aggregate(list("Unstable_Reps"=unstable_reps$Mean_Units), by = list("Med_Units"=unstable_reps$Med_Units), FUN = length) count_total <- merge(count_stable, count_unstable, all = TRUE) count_total[is.na(count_total)] <- 0 count_total$Total_Reps <- count_total$Stable_Reps+count_total$Unstable_Reps count_total$Flagged_Unstable <- (count_total$Unstable_Reps)/count_total$Total_Reps write.table(count_total, paste0(output, "_by_heterozygosity.csv"), quote = FALSE, col.names = TRUE, row.names = FALSE, sep="\t") } by_sample = function(df, locus, output){ df <- merge(df, locus[c("Call_ID", "Med_Units", "Mean_Units", "SD")], by="Call_ID") poly <- subset(df, df$All1 != Med_Units | df$All2 != Med_Units) pa <- aggregate(poly$Sample_ID, by = list(poly$Sample_ID), FUN = length) a <- aggregate(df$Sample_ID, by = list(df$Sample_ID), FUN = length) b <- aggregate((All1+All2)/2 ~Sample_ID, data = df, FUN = mean) mdata <- melt(df[c("Sample_ID", "All2", "All2")], id="Sample_ID") c <- aggregate(mdata$value, by = list(mdata$Sample_ID), FUN = max) z <- data.frame("Sample_ID"=a$Group.1, "Highest_Repeat"=c$x, "Total_Repeats"=a$x, "Mean_Rep_length"=round(b$"(All1 + All2)/2", 2), "Unstable_Reps"=pa$x, "Percentage_Unstable_Reps"=round((pa$x/a$x*100), 2)) z <- z[order(z$Sample_ID),] write.table(z, paste0(output, "_by_sample.csv"), quote = FALSE, col.names = TRUE, row.names = FALSE, sep="\t") } by_chr = function(df, locus, output){ if (grepl("chr", as.character(df$Chr[1]), fixed = TRUE) == F){chrmbp <- data.frame("Chr"=c( as.character(c(1:22)),"X","Y"), "Mbp"=c(249,237,192,183,174,165,153,135,132,132,132,123,108,105,99,84,81,75,69,63,54,57,141,60))} else if (grepl("chr", as.character(df$Chr[1]), fixed = TRUE) == T) {chrmbp <- data.frame("Chr"=c( paste0("chr", as.character(c(1:22))),"chrX","chrY"), "Mbp"=c(249,237,192,183,174,165,153,135,132,132,132,123,108,105,99,84,81,75,69,63,54,57,141,60))} tot <- aggregate(list("Total_Reps"=locus$Chr), by = list("Chr"=locus$Chr), FUN = length) ave <- aggregate(list("Mean_Units"=(df$All1+df$All2)/2), by = list("Chr"=df$Chr), FUN = mean) med <- aggregate(list("Med_Units"=(df$All1+df$All2)/2), by = list("Chr"=df$Chr), FUN = median) status <- subset(locus, Status != "STABLE") instab <- aggregate(list("Polymorphic_Reps"=status$Status), by = list("Chr"=status$Chr), FUN = length) compile <- Reduce(function(x, y) merge(x, y, all=TRUE, by="Chr"), list(chrmbp, tot, ave, med, instab)) compiple <- compile[is.na(compile)] <- 0 compile$Mean_Units <- round(compile$Mean_Units) compile$Poly_Ratio <- round(compile$Polymorphic_Reps / compile$Total_Reps, 3) compile$Reps_per_Mbp <- round(compile$Total_Reps / compile$Mbp, 3) compile$Poly_per_Mbp <- round(compile$Polymorphic_Reps / compile$Mbp, 3) compile <- compile[order(match(compile$Chr, chrmbp$Chr)),] write.table(compile, paste0(output, "_by_chr.csv"), quote = FALSE, col.names = TRUE, row.names = FALSE, sep="\t") }