gatk-3.8/R/Data.Processing.Report.r

308 lines
18 KiB
R

#Before executing this file, save squid files as csv, then as tab deliminated files with only the column values as the header, change the format of all cells to numbers. Assign the path to these files to "samples" and "lanes" respectively.
#TODO: make sure all font sizes readable
#put everything into one decent looking pdf
#set up database stuff for firehose and picard interface
#
stuffmaker<-function(args){
lanes<-args[1]
samples<-args[2]
sample_sets<-args[3]
eval<-args[4]
titveval<-args[5]
DOCi<-args[6]
DOCs<-args[7]
if(is.na(sample_sets)){
print("Please specify sample set for file naming and press enter.")
scan("stdin", what="character",n=1)->sample_sets
print("Thanks!")
}
if(is.na(lanes) == FALSE && is.na(samples)==FALSE){
#this makes a table & graphs using Picard data
if(typeof(lanes)=="character"){
read.delim(file=lanes, header= TRUE)->bylane;
colnames(bylane)<-c('Initiative','Project','GSSR.ID','External.ID','WR.ID','Flowcell','Lane','Lane.Type','Library','AL_TOTAL_READS','AL_PF_READS','AL_PCT_PF_READS','AL_PF_NOISE_READS','AL_PF_READS_ALIGNED','AL_PCT_PF_READS_ALIGNED','AL_PF_HQ_ALIGNED_READS','AL_PF_HQ_ALIGNED_BASES','AL_PF_HQ_ALIGNED_Q20_BASES','AL_PF_HQ_MEDIAN_MISMATCHES','AL_MEAN_READ_LENGTH','AL_READS_ALIGNED_IN_PAIRS','AL_PCT_READS_ALIGNED_IN_PAIRS','AL_BAD_CYCLES','AL_PCT_STRAND_BALANCE','DUP_UNPAIRED_READS_EXAMINED','DUP_READ_PAIRS_EXAMINED','DUP_UNMAPPED_READS','DUP_UNPAIRED_READ_DUPLICATES','DUP_READ_PAIR_DUPLICATES','DUP_PERCENT_DUPLICATION','DUP_ESTIMATED_LIBRARY_SIZE','HS_BAIT_SET','HS_GENOME_SIZE','HS_LIBRARY_SIZE','HS_BAIT_TERRITORY','HS_TARGET_TERRITORY','HS_BAIT_DESIGN_EFFICIENCY','HS_TOTAL_READS','HS_PF_READS','HS_PF_UNIQUE_READS','HS_PCT_PF_READS','HS_PCT_PF_UQ_READS','HS_PCT_PF_UQ_READS_ALIGNED','HS_PF_UQ_READS_ALIGNED','HS_PF_UQ_BASES_ALIGNED','HS_ON_BAIT_BASES','HS_NEAR_BAIT_BASES','HS_OFF_BAIT_BASES','HS_ON_TARGET_BASES','HS_PCT_SELECTED_BASES','HS_PCT_OFF_BAIT','HS_ON_BAIT_VS_SELECTED','HS_MEAN_BAIT_COVERAGE','HS_MEAN_TARGET_COVERAGE','HS_FOLD_ENRICHMENT','HS_ZERO_CVG_TARGETS_PCT','HS_FOLD_80_BASE_PENALTY','HS_PCT_TARGET_BASES_2X','HS_PCT_TARGET_BASES_10X','HS_PCT_TARGET_BASES_20X','HS_PCT_TARGET_BASES_30X','HS_PENALTY_10X','HS_PENALTY_20X','HS_PENALTY_30X','SNP_TOTAL_SNPS','SNP_PCT_DBSNP','SNP_NUM_IN_DBSNP','Lane.IC.Matches','Lane.IC.PCT.Mean.RD1.Err.Rate','Lane.IC.PCT.Mean.RD2.Err.Rate','FP_PANEL_NAME','FP_PANEL_SNPS','FP_CONFIDENT_CALLS','FP_CONFIDENT_MATCHING_SNPS','FP_CONFIDENT_CALLED_PCT','FP_CONFIDENT_MATCHING_SNPS_PCT','LPCNCRD_REFERENCE','LPCNCRD_NON_REFERENCE','LPCNCRD_PCT_CONCORDANCE')
}else{
lanes->bylane
colnames(bylane)<-c('Initiative','Project','GSSR.ID','External.ID','WR.ID','Flowcell','Lane','Lane.Type','Library','AL_TOTAL_READS','AL_PF_READS','AL_PCT_PF_READS','AL_PF_NOISE_READS','AL_PF_READS_ALIGNED','AL_PCT_PF_READS_ALIGNED','AL_PF_HQ_ALIGNED_READS','AL_PF_HQ_ALIGNED_BASES','AL_PF_HQ_ALIGNED_Q20_BASES','AL_PF_HQ_MEDIAN_MISMATCHES','AL_MEAN_READ_LENGTH','AL_READS_ALIGNED_IN_PAIRS','AL_PCT_READS_ALIGNED_IN_PAIRS','AL_BAD_CYCLES','AL_PCT_STRAND_BALANCE','DUP_UNPAIRED_READS_EXAMINED','DUP_READ_PAIRS_EXAMINED','DUP_UNMAPPED_READS','DUP_UNPAIRED_READ_DUPLICATES','DUP_READ_PAIR_DUPLICATES','DUP_PERCENT_DUPLICATION','DUP_ESTIMATED_LIBRARY_SIZE','HS_BAIT_SET','HS_GENOME_SIZE','HS_LIBRARY_SIZE','HS_BAIT_TERRITORY','HS_TARGET_TERRITORY','HS_BAIT_DESIGN_EFFICIENCY','HS_TOTAL_READS','HS_PF_READS','HS_PF_UNIQUE_READS','HS_PCT_PF_READS','HS_PCT_PF_UQ_READS','HS_PCT_PF_UQ_READS_ALIGNED','HS_PF_UQ_READS_ALIGNED','HS_PF_UQ_BASES_ALIGNED','HS_ON_BAIT_BASES','HS_NEAR_BAIT_BASES','HS_OFF_BAIT_BASES','HS_ON_TARGET_BASES','HS_PCT_SELECTED_BASES','HS_PCT_OFF_BAIT','HS_ON_BAIT_VS_SELECTED','HS_MEAN_BAIT_COVERAGE','HS_MEAN_TARGET_COVERAGE','HS_FOLD_ENRICHMENT','HS_ZERO_CVG_TARGETS_PCT','HS_FOLD_80_BASE_PENALTY','HS_PCT_TARGET_BASES_2X','HS_PCT_TARGET_BASES_10X','HS_PCT_TARGET_BASES_20X','HS_PCT_TARGET_BASES_30X','HS_PENALTY_10X','HS_PENALTY_20X','HS_PENALTY_30X','SNP_TOTAL_SNPS','SNP_PCT_DBSNP','SNP_NUM_IN_DBSNP','Lane.IC.Matches','Lane.IC.PCT.Mean.RD1.Err.Rate','Lane.IC.PCT.Mean.RD2.Err.Rate','FP_PANEL_NAME','FP_PANEL_SNPS','FP_CONFIDENT_CALLS','FP_CONFIDENT_MATCHING_SNPS','FP_CONFIDENT_CALLED_PCT','FP_CONFIDENT_MATCHING_SNPS_PCT','LPCNCRD_REFERENCE','LPCNCRD_NON_REFERENCE','LPCNCRD_PCT_CONCORDANCE')
}
if(typeof(samples)=="character"){
read.delim(file=samples, header= TRUE)->bysample;
}else{
samples->bysample
}
#Calc by lane metrics
attach(bylane);
callable.target<-HS_TARGET_TERRITORY[1];
singlelanes<-length(which(Lane.Type=="Single"));
pairedlanes<-length(which(Lane.Type=="Paired"));
mean.read.lane<-signif(mean(AL_TOTAL_READS, na.rm=TRUE));
sd.read.lane<-signif(sd(AL_TOTAL_READS, na.rm=TRUE));
mean.ub.lane<-signif(mean(HS_ON_TARGET_BASES, na.rm=TRUE));
sd.ub.lane<-signif(sd(HS_ON_TARGET_BASES, na.rm=TRUE));
mean.cov.lane<-round(mean(HS_MEAN_TARGET_COVERAGE, na.rm=TRUE));
sd.cov.lane<-round(sd(HS_MEAN_TARGET_COVERAGE, na.rm=TRUE));
mean.10x.lane<-round(mean(HS_PCT_TARGET_BASES_10X, na.rm=TRUE));
mean.20x.lane<-round(mean(HS_PCT_TARGET_BASES_20X, na.rm=TRUE));
mean.30x.lane<-round(mean(HS_PCT_TARGET_BASES_30X, na.rm=TRUE));
sd.10x.lane<-round(sd(HS_PCT_TARGET_BASES_10X, na.rm=TRUE));
sd.20x.lane<-round(sd(HS_PCT_TARGET_BASES_20X, na.rm=TRUE));
sd.30x.lane<-round(sd(HS_PCT_TARGET_BASES_30X, na.rm=TRUE))
names<-paste(Flowcell, "-", Lane, sep="")
#makes a plot of the number of SNPS called per lane
pdf(file=paste(sample_sets, "_SNPS.pdf", sep=""), width=0.2*length(SNP_TOTAL_SNPS), height=0.1*length(SNP_TOTAL_SNPS))
ticks<-c(match(unique(Flowcell), sort(Flowcell)) )
ys=rep(c(min(SNP_TOTAL_SNPS, na.rm=TRUE)*0.96, max(SNP_TOTAL_SNPS, na.rm=TRUE)*1.04, max(SNP_TOTAL_SNPS, na.rm=TRUE)*1.04, min(SNP_TOTAL_SNPS, na.rm=TRUE)*0.96, min(SNP_TOTAL_SNPS, na.rm=TRUE)*0.96), ceiling(length(ticks)/2))
layout(matrix(c(1,1 , 2), 1, 3, byrow=FALSE), respect=TRUE)
par(mar=c(10, 6, 3, 8))
plot(1:length(SNP_TOTAL_SNPS), main=paste(sample_sets, ": SNPs Called in Each Lane sorted by Flowcell", sep=""), SNP_TOTAL_SNPS[order(Flowcell)], xlab="", ylab="SNPs Called in Lane", ylim = c(min(SNP_TOTAL_SNPS, na.rm=TRUE), max(SNP_TOTAL_SNPS, na.rm=TRUE)), xaxt="n", pch=NA, cex.main=2, cex.axis=1.25, cex.lab=1.5)
axis(side=1, at=c(1:length(Flowcell))), labels=Lane[order(Flowcell)], tick=FALSE, hadj=1, cex.axis=1.25)
axis(side=1, at=(ticks), labels=sort(unique(Flowcell)), tick=FALSE, vadj=2, hadj=1, cex.axis=1.25, las=2)
shader<-ticks[c(rep(c(1,1,2,2,1), ceiling(length(ticks)/2))+sort(rep(seq(0, length(ticks),by=2), 5)))]-0.5
if((length(ticks)%%2 > 0)){
shader[(length(shader)-2):(length(shader)-1)]<-length(Flowcell)+0.5
}
shader<-na.omit(shader)
polygon(shader, ys, border="black", lty=0, col="gray")
cols<-rep("blue", length(SNP_TOTAL_SNPS))
cols[which(SNP_TOTAL_SNPS %in% boxplot.stats(SNP_TOTAL_SNPS)$out)]<-"red"
points(1:length(SNP_TOTAL_SNPS), SNP_TOTAL_SNPS, col=cols, pch=19)
if(length(boxplot.stats(SNP_TOTAL_SNPS)$out)>0){
legend("bottomright", legend=c("Normal SNP Call Counts", "Outlier SNP Call Counts"), pch=19, col=c("Blue", "red"), bg="White")
}
boxplot(SNP_TOTAL_SNPS, main="SNPs Called in Lane", ylab="SNPs Called", cex.axis=1.25 )
if(length(boxplot.stats(SNP_TOTAL_SNPS)$out)==0){
mtext("No outliers", side=1, line=4)
}else{
mtext(paste("Outlier SNP call counts in ", length(boxplot.stats(SNP_TOTAL_SNPS)$out), "lanes"), side=1, line=4)
}
dev.off()
#makes a plot of fingerprint calls and labels them good or bad
badsnps<-union(which(FP_CONFIDENT_MATCHING_SNPS<15), which(FP_CONFIDENT_MATCHING_SNPS<15))
colors<-c(rep("Blue", length(FP_CONFIDENT_CALLS)))
colors[badsnps]<-"Red"
ticks<-c(match(unique(Flowcell), Flowcell) )
ys=rep(c(0, 24*1.04, 24*1.04, 0, 0), ceiling(length(ticks)/2))
pdf(file=paste(sample_sets, "_Fingerprints.pdf", sep=""), width=.2*length(FP_CONFIDENT_CALLS), height=.1*length(FP_CONFIDENT_CALLS))
par(mar=c(10, 6, 8, 3))
plot(1:length(FP_CONFIDENT_MATCHING_SNPS), FP_CONFIDENT_MATCHING_SNPS, pch=NA, ylim=c(0,24), ylab="Fingerprint calls", xlab="", xaxt="n", col=colors, main="Fingerprint Calling and Matching Sorted by lane", cex.main=3, cex.lab=2)
axis(side=1, at=(ticks+1), labels=unique(Flowcell), tick=FALSE, hadj=1, cex.axis=1.25, las=2)
shader<-ticks[c(rep(c(1,1,2,2,1), ceiling(length(ticks)/2))+sort(rep(seq(0, length(ticks),by=2), 5)))]-0.5
shader<-na.omit(shader)
if((length(ticks)%%2 > 0)){
shader[(length(shader)-2):(length(shader)-1)]<-length(Flowcell)+0.5
}
polygon(shader, ys, border="black", lty=0, col="gray")
points(1:length(FP_CONFIDENT_MATCHING_SNPS), FP_CONFIDENT_MATCHING_SNPS, pch=4, col=colors)
points(1:length(FP_CONFIDENT_MATCHING_SNPS), FP_CONFIDENT_CALLS, pch=3, col=colors)
if(length(badsnps)>0){
legend("bottomright", legend=c("Confident calls at fingerprint sites by lane", "Confident matching calls at fingerprint sites by lane", "Confident calls in bad lanes", "Confident matching calls in bad lanes", "All Confident calls match fingerprint sites"), pch=c(4,3,4,3,8), col=c("Blue", "Blue", "Red", "Red", "Black" ), bg="White")
mtext("Some problematic fingerprint sites", side=3)
}else{
legend("bottomright", legend=c("Confident calls at fingerprint sites by lane", "Confident matching calls at fingerprint sites by lane", "All Confident calls match fingerprint sites"), pch=c(4, 3, 8), col=c("Blue", "Blue", "Black"), bg="White")
}
dev.off()
detach(bylane)
#Calc by sample metrics
attach(bysample);
mean.lanes.samp<-signif(mean(X..Lanes.included.in.aggregation, na.rm = TRUE));
sd.lanes.samp<-signif(sd(X..Lanes.included.in.aggregation, na.rm=TRUE));
mean.mrl.samp<-signif(mean(Mean.Read.Length, na.rm=TRUE));
sd.mrl.samp<-signif(sd(Mean.Read.Length, na.rm=TRUE));
mean.read.samp<-signif(mean(Total.Reads, na.rm=TRUE));
sd.read.samp<-signif(sd(Total.Reads, na.rm=TRUE));
mean.ub.samp<-signif(mean(On.Target.Bases..HS., na.rm=TRUE));
sd.ub.samp<-signif(sd(On.Target.Bases..HS., na.rm=TRUE));
mean.cov.samp<-round(mean(Mean.Target.Coverage..HS., na.rm=TRUE));
sd.cov.samp<-round(sd(Mean.Target.Coverage..HS., na.rm=TRUE));
mean.10x.samp<-round(mean(PCT.Target.Bases.10x..HS., na.rm=TRUE));
mean.20x.samp<-round(mean(PCT.Target.Bases.20x..HS., na.rm=TRUE));
mean.30x.samp<-round(mean(PCT.Target.Bases.30x..HS., na.rm=TRUE));
sd.10x.samp<-round(sd(PCT.Target.Bases.10x..HS., na.rm=TRUE));
sd.20x.samp<-round(sd(PCT.Target.Bases.20x..HS., na.rm=TRUE));
sd.30x.samp<-round(sd(PCT.Target.Bases.30x..HS., na.rm=TRUE));
detach(bysample);
#print all of this stuff out in R.
print(paste("Callable Target: ", callable.target, " bases", sep=""), quote = FALSE);
print(paste("Used Lanes per Sample: ", mean.lanes.samp, " +/- ", sd.lanes.samp, sep=""), quote=FALSE);
print(paste("Parities: ", singlelanes, " single lanes, ", pairedlanes, " paired lanes", sep=""), quote=FALSE);
print(paste("Read Legnths: ", mean.mrl.samp, " bp +/- ", sd.mrl.samp, " bp", sep=""), quote = FALSE);
print(paste("Reads per lane: ", round(mean.read.lane/10^6, 1), "M +/- ", round(sd.read.lane/10^6, 1), "M", sep=""), quote = FALSE);
print(paste("Reads per sample: ", round(mean.read.samp/10^9, 1), "B +/- ", round(sd.read.samp/10^9, 1), "B", sep=""), quote = FALSE);
print(paste("Used bases per lane: ", mean.ub.lane, " +/- ", sd.ub.lane, sep=""), quote = FALSE);
print(paste("Used bases per sample: ", mean.ub.samp, " +/- ", sd.ub.samp, sep=""), quote = FALSE)
print(paste("Average target coverage per lane: ", mean.cov.lane, "x +/- ", sd.cov.lane, "x", sep=""), quote = FALSE);
print(paste("Average target coverage per sample: ", mean.cov.samp, "x +/- ", sd.cov.samp, "x", sep=""), quote = FALSE);
print(paste("% loci covered to 10x per lane: ", mean.10x.lane, "% +/- ", sd.10x.lane, "%", sep=""), quote = FALSE)
print(paste("% loci covered to 10x per sample: ", mean.10x.samp, " +/- ", sd.10x.samp, "%", sep=""), quote = FALSE)
print(paste("% loci covered to 20x per lane: ", mean.20x.lane, "% +/- ", sd.20x.lane, "%", sep=""), quote = FALSE)
print(paste("% loci covered to 20x per sample: ", mean.20x.samp, "% +/- ", sd.20x.samp, "%", sep=""), quote = FALSE)
print(paste("% loci covered to 30x per lane: ", mean.30x.lane, "% +/- ", sd.30x.lane, "%", sep=""), quote = FALSE)
print(paste("% loci covered to 30x per sample: ", mean.30x.samp, "% +/- ", sd.30x.samp, "%", sep=""), quote = FALSE)
}else{
print("Lane and Sample metrics file paths not provided")
}
#Makes Error Rate percycle graph
if(is.na(eval)==FALSE){
if(typeof(eval)=="character"){
read.delim(eval, header=TRUE)[2:ncol(read.delim(eval, header=TRUE))]->errpercycle
}else{
eval->errpercycle
}
pdf(paste(sample_sets, "_errorrate_per_cycle.pdf", sep=""), width=6, height=5)
crazies<-which(errpercycle[75,]>0.3) #this can be changed to any kind of filter for particular lanes
colors<-rainbow(ncol(errpercycle), s=0.5, v=0.5)
colors[crazies]<-rainbow(length(crazies))
weights<-rep(1, ncol(errpercycle))
weights[crazies]<-2
matplot(errpercycle, type="l", lty="solid", col=colors, lwd=weights, main="Error Rate per Cycle", ylab="Error Rate", xlab="Cycle", ylim=c(0, 0.7))
if(length(crazies)>0){
legend("topleft", title="Unusual Lanes", legend=colnames(errpercycle)[crazies], lty="solid", lwd=2, col=colors[crazies], xjust=0.5)
}else{
legend("topleft", legend="No unusual lanes.", bty="n")
}
dev.off()
}else{
print("Error Rate Per Cycle file paths not provided")
}
#Makes TI/TV known v novel graph
if(is.na(titveval)==FALSE){
##TODO: need ot make sure this is nice and prettified.
titv<-read.csv(file=titveval, skip=1)
attach(titv)
pdf(file=paste(sample_sets, "_TI-TV.pdf", sep=""), width=0.2*length(unique(sample)), height=0.175*length(unique(sample)))
par(mar=c(11, 4, 4, 2))
plot(seq(1:length(unique(sample))), Ti.Tv[which(novelty_name=="novel" & filter_name=="called")], xaxt="n", ylim=c(1, 4), main="Ti/Tv for Novel and Known SNP calls", ylab="Ti/Tv", xlab="", col="red", pch=1)
points(seq(1:length(unique(sample))), Ti.Tv[which(novelty_name=="known" & filter_name=="called")], pch=1, col="blue")
axis(side=1, at=(1:length(unique(sample))), labels=unique(sample), tick=FALSE, hadj=1, cex.axis=1, las=2)
abline(a=mean(Ti.Tv[which(novelty_name=="all" & filter_name=="called")]),b=0)
legend("bottomright", legend=c("Known Variants", "Novel Variants", "Mean Ti/Tv for all variants"), col=c("blue", "red", "black"), pch=c(1,1,NA_integer_), lty=c(0, 0, 1), xjust=0.5)
mtext(line=9,"Lower Ti/Tv ratios indicate potentially increased false positive SNP rates.", side=1)
dev.off()
}else{
print("TiTV filepath not provided")
}
#Make DOC graph
if(is.na(DOCi)==FALSE){
pdf(paste(sample_set, "_DOCi.pdf", sep=""), width=6, height=5)
if(typeof(DOCi)=="character"){
as.data.frame(read.delim(DOCi))->DOC
}else{
DOCi->DOCdata
}
colnames(DOC)->cols
apply(DOC[,grep("mean", cols)], 1, median)->medianofmeans
apply(DOC[,grep("mean", cols)], 1, quantile, probs=3/4)->q3s
apply(DOC[,grep("mean", cols)], 1, quantile, probs=1/4)->q1s
par(ylog=FALSE, mar=c(5, 4, 4, 2))
plot(c(1:3122),sort(medianofmeans, decreasing=TRUE), type="l", lwd="1",log="y",ylab="Coverage", xlab="Targets sorted by median average coverage across sample",xaxt="n", main="Coverage Across All Targets")
abline(h=15, lty="dotted")
lines(c(1:3122),q3s[order(medianofmeans, decreasing=TRUE)])
lines(c(1:3122),q1s[order(medianofmeans, decreasing=TRUE)])
legend("bottomleft", "15x coverage", box.lty=0, lty="dotted")
dev.off()
pdf(paste(sample_set, "_DOCiy.pdf", sep=""), width=6, height=5)
yuck<-DOC[which(medianofmeans<15),grep("mean", cols)]
yuck<-yuck+0.1
par(mar=c(16, 4, 4, 2))
boxplot(t(yuck[order(medianofmeans[which(medianofmeans<15)], decreasing=TRUE),]),log="y", yaxt="n", xaxt="n", ylab="Average coverage accross all samples", main="Targets with low coverage accross samples")
axis(2, at=axTicks(2)+c(0, rep(0.1, length(axTicks(2))-1)), labels=c(0.0, axTicks(2)[2:length(axTicks(2))]))
mtext("Target", side=1, line=14)
axis(1, at=c(1:length(which(medianofmeans<15))), labels=DOC[which(medianofmeans<15),1][order(medianofmeans[which(medianofmeans<15)])], las=2)
dev.off()
}else{
print("Depth of Coverage--intervals filepath not provided")
}
if(is.na(DOCs)==FALSE){
pdf(paste(sample_set, "_DOCs.pdf", sep=""), width=6, height=5)
if(typeof(DOCs)=="character"){
as.data.frame(read.delim(DOCs))->DOC
}else{
DOCs->DOCdata
}
par(mar=c(10, 4, 4, 2))
boxplot(t(DOC2[,2:ncol(DOC2)]+0.1), log="y", main="Depth of Coverage by Sample", xaxt="n", yaxt="n", ylab="Coverage")
axis(1, at=c(1:nrow(DOC2)), labels=DOC2[,1], las=2)
axis(2, at=axTicks(2)+c(0, rep(0.1, length(axTicks(2))-1)), labels=floor(c(0.0, axTicks(2)[2:length(axTicks(2))])))
labels=floor(c(0.0, axTicks(2)[2:length(axTicks(2))]))
mtext("Samples", side=1, line=9)
dev.off()
}else{
print("Depth of Coverage--samples filepath not provided")
}
}
if(length(commandArgs(TRUE))>0){
stuffmaker(commandArgs(TRUE))
}