Bringing Reduce Reads bug fixes to the main repository

This commit is contained in:
Mauricio Carneiro 2011-09-07 12:25:53 -04:00
commit 131cb7effd
38 changed files with 443 additions and 213 deletions

View File

@ -12,7 +12,9 @@ if ( onCMDLine ) {
inputFileName = args[1]
outputPDF = args[2]
} else {
inputFileName = "~/Desktop/broadLocal/GATK/unstable/report.txt"
#inputFileName = "~/Desktop/broadLocal/GATK/unstable/report.txt"
inputFileName = "/humgen/gsa-hpprojects/dev/depristo/oneOffProjects/Q-25718@node1149.jobreport.txt"
#inputFileName = "/humgen/gsa-hpprojects/dev/depristo/oneOffProjects/rodPerformanceGoals/history/report.082711.txt"
outputPDF = NA
}
@ -113,11 +115,22 @@ plotGroup <- function(groupTable) {
textplot(as.data.frame(sum), show.rownames=F)
title(paste("Job summary for", name, "itemizing each iteration"), cex=3)
# histogram of job times by groupAnnotations
if ( length(groupAnnotations) == 1 && dim(sub)[1] > 1 ) {
# todo -- how do we group by annotations?
p <- ggplot(data=sub, aes(x=runtime)) + geom_histogram()
p <- p + xlab("runtime in seconds") + ylab("No. of jobs")
p <- p + opts(title=paste("Job runtime histogram for", name))
print(p)
}
# as above, but averaging over all iterations
groupAnnotationsNoIteration = setdiff(groupAnnotations, "iteration")
sum = cast(melt(sub, id.vars=groupAnnotationsNoIteration, measure.vars=c("runtime")), ... ~ ., fun.aggregate=c(mean, sd))
textplot(as.data.frame(sum), show.rownames=F)
title(paste("Job summary for", name, "averaging over all iterations"), cex=3)
if ( dim(sub)[1] > 1 ) {
sum = cast(melt(sub, id.vars=groupAnnotationsNoIteration, measure.vars=c("runtime")), ... ~ ., fun.aggregate=c(mean, sd))
textplot(as.data.frame(sum), show.rownames=F)
title(paste("Job summary for", name, "averaging over all iterations"), cex=3)
}
}
# print out some useful basic information
@ -146,7 +159,7 @@ plotJobsGantt(gatkReportData, T)
plotJobsGantt(gatkReportData, F)
plotProgressByTime(gatkReportData)
for ( group in gatkReportData ) {
plotGroup(group)
plotGroup(group)
}
if ( ! is.na(outputPDF) ) {

View File

@ -3,7 +3,7 @@ package org.broadinstitute.sting.gatk.refdata;
import net.sf.samtools.util.SequenceUtil;
import org.broad.tribble.Feature;
import org.broad.tribble.annotation.Strand;
import org.broad.tribble.dbsnp.DbSNPFeature;
import org.broad.tribble.dbsnp.OldDbSNPFeature;
import org.broad.tribble.gelitext.GeliTextFeature;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.utils.classloader.PluginManager;
@ -93,27 +93,27 @@ public class VariantContextAdaptors {
// --------------------------------------------------------------------------------------------------------------
private static class DBSnpAdaptor implements VCAdaptor {
private static boolean isSNP(DbSNPFeature feature) {
private static boolean isSNP(OldDbSNPFeature feature) {
return feature.getVariantType().contains("single") && feature.getLocationType().contains("exact");
}
private static boolean isMNP(DbSNPFeature feature) {
private static boolean isMNP(OldDbSNPFeature feature) {
return feature.getVariantType().contains("mnp") && feature.getLocationType().contains("range");
}
private static boolean isInsertion(DbSNPFeature feature) {
private static boolean isInsertion(OldDbSNPFeature feature) {
return feature.getVariantType().contains("insertion");
}
private static boolean isDeletion(DbSNPFeature feature) {
private static boolean isDeletion(OldDbSNPFeature feature) {
return feature.getVariantType().contains("deletion");
}
private static boolean isIndel(DbSNPFeature feature) {
private static boolean isIndel(OldDbSNPFeature feature) {
return isInsertion(feature) || isDeletion(feature) || isComplexIndel(feature);
}
public static boolean isComplexIndel(DbSNPFeature feature) {
public static boolean isComplexIndel(OldDbSNPFeature feature) {
return feature.getVariantType().contains("in-del");
}
@ -125,7 +125,7 @@ public class VariantContextAdaptors {
*
* @return an alternate allele list
*/
public static List<String> getAlternateAlleleList(DbSNPFeature feature) {
public static List<String> getAlternateAlleleList(OldDbSNPFeature feature) {
List<String> ret = new ArrayList<String>();
for (String allele : getAlleleList(feature))
if (!allele.equals(String.valueOf(feature.getNCBIRefBase()))) ret.add(allele);
@ -139,7 +139,7 @@ public class VariantContextAdaptors {
*
* @return an alternate allele list
*/
public static List<String> getAlleleList(DbSNPFeature feature) {
public static List<String> getAlleleList(OldDbSNPFeature feature) {
List<String> alleleList = new ArrayList<String>();
// add ref first
if ( feature.getStrand() == Strand.POSITIVE )
@ -156,14 +156,14 @@ public class VariantContextAdaptors {
/**
* Converts non-VCF formatted dbSNP records to VariantContext.
* @return DbSNPFeature.
* @return OldDbSNPFeature.
*/
@Override
public Class<? extends Feature> getAdaptableFeatureType() { return DbSNPFeature.class; }
public Class<? extends Feature> getAdaptableFeatureType() { return OldDbSNPFeature.class; }
@Override
public VariantContext convert(String name, Object input, ReferenceContext ref) {
DbSNPFeature dbsnp = (DbSNPFeature)input;
OldDbSNPFeature dbsnp = (OldDbSNPFeature)input;
if ( ! Allele.acceptableAlleleBases(dbsnp.getNCBIRefBase()) )
return null;
Allele refAllele = Allele.create(dbsnp.getNCBIRefBase(), true);

View File

@ -26,8 +26,11 @@ package org.broadinstitute.sting.gatk.refdata.tracks;
import net.sf.samtools.SAMSequenceDictionary;
import net.sf.samtools.util.CloseableIterator;
import org.apache.log4j.Logger;
import org.broad.tribble.Feature;
import org.broad.tribble.FeatureCodec;
import org.broad.tribble.FeatureSource;
import org.broad.tribble.iterators.CloseableTribbleIterator;
import org.broad.tribble.source.PerformanceLoggingFeatureSource;
import org.broadinstitute.sting.gatk.refdata.utils.FeatureToGATKFeatureIterator;
import org.broadinstitute.sting.gatk.refdata.utils.GATKFeature;
import org.broadinstitute.sting.utils.GenomeLoc;
@ -47,7 +50,6 @@ import java.io.IOException;
*/
public class RMDTrack {
private final static Logger logger = Logger.getLogger(RMDTrackBuilder.class);
private final static boolean DEBUG = false;
// the basics of a track:
private final Class type; // our type
@ -113,8 +115,10 @@ public class RMDTrack {
}
public CloseableIterator<GATKFeature> query(GenomeLoc interval) throws IOException {
if ( DEBUG ) logger.debug("Issuing query for %s: " + interval);
return new FeatureToGATKFeatureIterator(genomeLocParser, reader.query(interval.getContig(),interval.getStart(),interval.getStop()), this.getName());
CloseableTribbleIterator<Feature> iter = reader.query(interval.getContig(),interval.getStart(),interval.getStop());
if ( RMDTrackBuilder.MEASURE_TRIBBLE_QUERY_PERFORMANCE )
logger.warn("Query " + getName() + ":" + ((PerformanceLoggingFeatureSource)reader).getPerformanceLog());
return new FeatureToGATKFeatureIterator(genomeLocParser, iter, this.getName());
}
public void close() {

View File

@ -27,20 +27,21 @@ package org.broadinstitute.sting.gatk.refdata.tracks;
import net.sf.samtools.SAMSequenceDictionary;
import net.sf.samtools.SAMSequenceRecord;
import org.apache.log4j.Logger;
import org.broad.tribble.*;
import org.broad.tribble.FeatureCodec;
import org.broad.tribble.FeatureSource;
import org.broad.tribble.Tribble;
import org.broad.tribble.TribbleException;
import org.broad.tribble.index.Index;
import org.broad.tribble.index.IndexFactory;
import org.broad.tribble.source.BasicFeatureSource;
import org.broad.tribble.source.PerformanceLoggingFeatureSource;
import org.broad.tribble.util.LittleEndianOutputStream;
import org.broadinstitute.sting.commandline.Tags;
import org.broadinstitute.sting.gatk.arguments.ValidationExclusion;
import org.broadinstitute.sting.gatk.refdata.ReferenceDependentFeatureCodec;
import org.broadinstitute.sting.gatk.refdata.utils.RMDTriplet;
import org.broadinstitute.sting.gatk.refdata.utils.RMDTriplet.RMDStorageType;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.SequenceDictionaryUtils;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.classloader.PluginManager;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
import org.broadinstitute.sting.utils.exceptions.UserException;
@ -51,7 +52,10 @@ import org.broadinstitute.sting.utils.instrumentation.Sizeof;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.*;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
@ -70,6 +74,7 @@ public class RMDTrackBuilder { // extends PluginManager<FeatureCodec> {
* our log, which we use to capture anything from this class
*/
private final static Logger logger = Logger.getLogger(RMDTrackBuilder.class);
public final static boolean MEASURE_TRIBBLE_QUERY_PERFORMANCE = false;
// a constant we use for marking sequence dictionary entries in the Tribble index property list
public static final String SequenceDictionaryPropertyPredicate = "DICT:";
@ -214,7 +219,10 @@ public class RMDTrackBuilder { // extends PluginManager<FeatureCodec> {
sequenceDictionary = getSequenceDictionaryFromProperties(index);
}
featureSource = new BasicFeatureSource(inputFile.getAbsolutePath(), index, createCodec(descriptor, name));
if ( MEASURE_TRIBBLE_QUERY_PERFORMANCE )
featureSource = new PerformanceLoggingFeatureSource(inputFile.getAbsolutePath(), index, createCodec(descriptor, name));
else
featureSource = new BasicFeatureSource(inputFile.getAbsolutePath(), index, createCodec(descriptor, name));
}
catch (TribbleException e) {
throw new UserException(e.getMessage());

View File

@ -105,10 +105,19 @@ public class DepthOfCoverageWalker extends LocusWalker<Map<DoCOutputType.Partiti
@Multiplex(value=DoCOutputMultiplexer.class,arguments={"partitionTypes","refSeqGeneList","omitDepthOutput","omitIntervals","omitSampleSummary","omitLocusTable"})
Map<DoCOutputType,PrintStream> out;
/**
* Sets the low-coverage cutoff for granular binning. All loci with depth < START are counted in the first bin.
*/
@Argument(fullName = "start", doc = "Starting (left endpoint) for granular binning", required = false)
int start = 1;
/**
* Sets the high-coverage cutoff for granular binning. All loci with depth > END are counted in the last bin.
*/
@Argument(fullName = "stop", doc = "Ending (right endpoint) for granular binning", required = false)
int stop = 500;
/**
* Sets the number of bins for granular binning
*/
@Argument(fullName = "nBins", doc = "Number of bins to use for granular binning", required = false)
int nBins = 499;
@Argument(fullName = "minMappingQuality", shortName = "mmq", doc = "Minimum mapping quality of reads to count towards depth. Defaults to -1.", required = false)
@ -119,28 +128,59 @@ public class DepthOfCoverageWalker extends LocusWalker<Map<DoCOutputType.Partiti
byte minBaseQuality = -1;
@Argument(fullName = "maxBaseQuality", doc = "Maximum quality of bases to count towards depth. Defaults to 127 (Byte.MAX_VALUE).", required = false)
byte maxBaseQuality = Byte.MAX_VALUE;
/**
* Instead of reporting depth, report the base pileup at each locus
*/
@Argument(fullName = "printBaseCounts", shortName = "baseCounts", doc = "Will add base counts to per-locus output.", required = false)
boolean printBaseCounts = false;
/**
* Do not tabulate locus statistics (# loci covered by sample by coverage)
*/
@Argument(fullName = "omitLocusTable", shortName = "omitLocusTable", doc = "Will not calculate the per-sample per-depth counts of loci, which should result in speedup", required = false)
boolean omitLocusTable = false;
/**
* Do not tabulate interval statistics (mean, median, quartiles AND # intervals by sample by coverage)
*/
@Argument(fullName = "omitIntervalStatistics", shortName = "omitIntervals", doc = "Will omit the per-interval statistics section, which should result in speedup", required = false)
boolean omitIntervals = false;
/**
* Do not print the total coverage at every base
*/
@Argument(fullName = "omitDepthOutputAtEachBase", shortName = "omitBaseOutput", doc = "Will omit the output of the depth of coverage at each base, which should result in speedup", required = false)
boolean omitDepthOutput = false;
@Argument(fullName = "printBinEndpointsAndExit", doc = "Prints the bin values and exits immediately. Use to calibrate what bins you want before running on data.", required = false)
boolean printBinEndpointsAndExit = false;
/**
* Do not tabulate the sample summary statistics (total, mean, median, quartile coverage per sample)
*/
@Argument(fullName = "omitPerSampleStats", shortName = "omitSampleSummary", doc = "Omits the summary files per-sample. These statistics are still calculated, so this argument will not improve runtime.", required = false)
boolean omitSampleSummary = false;
/**
* A way of partitioning reads into groups. Can be sample, readgroup, or library.
*/
@Argument(fullName = "partitionType", shortName = "pt", doc = "Partition type for depth of coverage. Defaults to sample. Can be any combination of sample, readgroup, library.", required = false)
Set<DoCOutputType.Partition> partitionTypes = EnumSet.of(DoCOutputType.Partition.sample);
/**
* Consider a spanning deletion as contributing to coverage. Also enables deletion counts in per-base output.
*/
@Argument(fullName = "includeDeletions", shortName = "dels", doc = "Include information on deletions", required = false)
boolean includeDeletions = false;
@Argument(fullName = "ignoreDeletionSites", doc = "Ignore sites consisting only of deletions", required = false)
boolean ignoreDeletionSites = false;
/**
* Path to the RefSeq file for use in aggregating coverage statistics over genes
*/
@Argument(fullName = "calculateCoverageOverGenes", shortName = "geneList", doc = "Calculate the coverage statistics over this list of genes. Currently accepts RefSeq.", required = false)
File refSeqGeneList = null;
/**
* The format of the output file
*/
@Argument(fullName = "outputFormat", doc = "the format of the output file (e.g. csv, table, rtable); defaults to r-readable table", required = false)
String outputFormat = "rtable";
/**
* A coverage threshold for summarizing (e.g. % bases >= CT for each sample)
*/
@Argument(fullName = "summaryCoverageThreshold", shortName = "ct", doc = "for summary file outputs, report the % of bases coverd to >= this number. Defaults to 15; can take multiple arguments.", required = false)
int[] coverageThresholds = {15};
@ -963,4 +1003,4 @@ class CoveragePartitioner {
public Map<DoCOutputType.Partition,List<String>> getIdentifiersByType() {
return identifiersByType;
}
}
}

View File

@ -0,0 +1,101 @@
package org.broadinstitute.sting.gatk.walkers.diagnostics;
import net.sf.samtools.SAMReadGroupRecord;
import net.sf.samtools.SAMRecord;
import org.broadinstitute.sting.commandline.Output;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.refdata.ReadMetaDataTracker;
import org.broadinstitute.sting.gatk.report.GATKReport;
import org.broadinstitute.sting.gatk.report.GATKReportTable;
import org.broadinstitute.sting.gatk.walkers.ReadWalker;
import java.io.PrintStream;
import java.util.List;
/**
* Outputs the read lengths of all the reads in a file.
*
* <p>
* Generates a table with the read lengths categorized per sample. If the file has no sample information
* (no read groups) it considers all reads to come from the same sample.
* </p>
*
*
* <h2>Input</h2>
* <p>
* A BAM file.
* </p>
*
* <h2>Output</h2>
* <p>
* A human/R readable table of tab separated values with one column per sample and one row per read.
* </p>
*
* <h2>Examples</h2>
* <pre>
* java
* -jar GenomeAnalysisTK.jar
* -T ReadLengthDistribution
* -I example.bam
* -R reference.fasta
* -o example.tbl
* </pre>
*
* @author Kiran Garimela
*/
public class ReadLengthDistribution extends ReadWalker<Integer, Integer> {
@Output
public PrintStream out;
private GATKReport report;
public void initialize() {
report = new GATKReport();
report.addTable("ReadLengthDistribution", "Table of read length distributions");
GATKReportTable table = report.getTable("ReadLengthDistribution");
table.addPrimaryKey("readLength");
List<SAMReadGroupRecord> readGroups = getToolkit().getSAMFileHeader().getReadGroups();
if (readGroups.isEmpty())
table.addColumn("SINGLE_SAMPLE", 0);
else
for (SAMReadGroupRecord rg : readGroups)
table.addColumn(rg.getSample(), 0);
}
public boolean filter(ReferenceContext ref, SAMRecord read) {
return ( !read.getReadPairedFlag() || read.getReadPairedFlag() && read.getFirstOfPairFlag());
}
@Override
public Integer map(ReferenceContext referenceContext, SAMRecord samRecord, ReadMetaDataTracker readMetaDataTracker) {
GATKReportTable table = report.getTable("ReadLengthDistribution");
int length = Math.abs(samRecord.getReadLength());
String sample = samRecord.getReadGroup().getSample();
table.increment(length, sample);
return null;
}
@Override
public Integer reduceInit() {
return null;
}
@Override
public Integer reduce(Integer integer, Integer integer1) {
return null;
}
public void onTraversalDone(Integer sum) {
report.print(out);
}
}

View File

@ -52,6 +52,11 @@ public class UnifiedArgumentCollection {
@Argument(fullName = "heterozygosity", shortName = "hets", doc = "Heterozygosity value used to compute prior likelihoods for any locus", required = false)
public Double heterozygosity = DiploidSNPGenotypePriors.HUMAN_HETEROZYGOSITY;
/**
* The PCR error rate is independent of the sequencing error rate, which is necessary because we cannot necessarily
* distinguish between PCR errors vs. sequencing errors. The practical implication for this value is that it
* effectively acts as a cap on the base qualities.
*/
@Argument(fullName = "pcr_error_rate", shortName = "pcr_error", doc = "The PCR error rate to be used for computing fragment-based likelihoods", required = false)
public Double PCR_error = DiploidSNPGenotypeLikelihoods.DEFAULT_PCR_ERROR_RATE;

View File

@ -77,6 +77,11 @@ import java.util.*;
* if even a weak evidence for the same indel, not necessarily a confident call, exists in the first sample ("Normal"), or as somatic
* if first bam has coverage at the site but no indication for an indel. In the --somatic mode, BED output contains
* only somatic calls, while --verbose output contains all calls annotated with GERMLINE/SOMATIC keywords.
*
* <b>If any of the general usage of this tool or any of the command-line arguments for this tool are not clear to you,
* please email asivache at broadinstitute dot org and he will gladly explain everything in more detail.</b>
*
*
*/
@ReadFilters({Platform454Filter.class, MappingQualityZeroFilter.class, PlatformUnitFilter.class})
public class SomaticIndelDetectorWalker extends ReadWalker<Integer,Integer> {

View File

@ -253,6 +253,13 @@ public class GenotypeAndValidateWalker extends RodWalker<GenotypeAndValidateWalk
@Argument(fullName ="sample", shortName ="sn", doc="Name of the sample to validate (in case your VCF/BAM has more than one sample)", required=false)
private String sample = "";
/**
* Print out discordance sites to standard out.
*/
@Hidden
@Argument(fullName ="print_interesting_sites", shortName ="print_interesting", doc="Print out interesting sites to standard out", required=false)
private boolean printInterestingSites;
private UnifiedGenotyperEngine snpEngine;
private UnifiedGenotyperEngine indelEngine;
@ -301,7 +308,12 @@ public class GenotypeAndValidateWalker extends RodWalker<GenotypeAndValidateWalk
UnifiedArgumentCollection uac = new UnifiedArgumentCollection();
uac.OutputMode = UnifiedGenotyperEngine.OUTPUT_MODE.EMIT_ALL_SITES;
uac.alleles = alleles;
if (!bamIsTruth) uac.GenotypingMode = GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES;
// TODO -- if we change this tool to actually validate against the called allele, then this if statement is needed;
// TODO -- for now, though, we need to be able to validate the right allele (because we only test isVariant below) [EB]
//if (!bamIsTruth)
uac.GenotypingMode = GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES;
if (mbq >= 0) uac.MIN_BASE_QUALTY_SCORE = mbq;
if (deletions >= 0)
uac.MAX_DELETION_FRACTION = deletions;
@ -371,19 +383,26 @@ public class GenotypeAndValidateWalker extends RodWalker<GenotypeAndValidateWalk
if (call.isVariant()) {
if (vcComp.isVariant())
counter.nAltCalledAlt = 1L; // todo -- may wanna check if the alts called are the same?
else
else {
counter.nAltCalledRef = 1L;
if ( printInterestingSites )
System.out.println("Truth=ALT Call=REF at " + call.getChr() + ":" + call.getStart());
}
}
// If truth is a confident ALT call
else {
if (vcComp.isVariant())
if (vcComp.isVariant()) {
counter.nRefCalledAlt = 1L;
else
if ( printInterestingSites )
System.out.println("Truth=REF Call=ALT at " + call.getChr() + ":" + call.getStart());
} else
counter.nRefCalledRef = 1L;
}
}
else {
counter.nNotConfidentCalls = 1L;
if ( printInterestingSites )
System.out.println("Truth is not confident at " + call.getChr() + ":" + call.getStart());
writeVariant = false;
}
}
@ -396,17 +415,24 @@ public class GenotypeAndValidateWalker extends RodWalker<GenotypeAndValidateWalk
if (call.isCalledAlt(callConf)) {
if (vcComp.getAttribute("GV").equals("T"))
counter.nAltCalledAlt = 1L;
else
else {
counter.nRefCalledAlt = 1L;
if ( printInterestingSites )
System.out.println("Truth=REF Call=ALT at " + call.getChr() + ":" + call.getStart());
}
}
else if (call.isCalledRef(callConf)) {
if (vcComp.getAttribute("GV").equals("T"))
if (vcComp.getAttribute("GV").equals("T")) {
counter.nAltCalledRef = 1L;
else
if ( printInterestingSites )
System.out.println("Truth=ALT Call=REF at " + call.getChr() + ":" + call.getStart());
} else
counter.nRefCalledRef = 1L;
}
else {
counter.nNotConfidentCalls = 1L;
if ( printInterestingSites )
System.out.println("Truth is not confident at " + call.getChr() + ":" + call.getStart());
writeVariant = false;
}
}

View File

@ -93,20 +93,30 @@ import java.util.List;
*/
@Requires(value={DataSource.REFERENCE})
public class ValidationAmplicons extends RodWalker<Integer,Integer> {
/**
* A Table-formatted file listing amplicon contig, start, stop, and a name for the amplicon (or probe)
*/
@Input(fullName = "ProbeIntervals", doc="A collection of intervals in table format with optional names that represent the "+
"intervals surrounding the probe sites amplicons should be designed for", required=true)
RodBinding<TableFeature> probeIntervals;
/**
* A VCF file containing the bi-allelic sites for validation. Filtered records will prompt a warning, and will be flagged as filtered in the output fastq.
*/
@Input(fullName = "ValidateAlleles", doc="A VCF containing the sites and alleles you want to validate. Restricted to *BI-Allelic* sites", required=true)
RodBinding<VariantContext> validateAlleles;
/**
* A VCF file containing variants to be masked. A mask variant overlapping a validation site will be ignored at the validation site.
*/
@Input(fullName = "MaskAlleles", doc="A VCF containing the sites you want to MASK from the designed amplicon (e.g. by Ns or lower-cased bases)", required=true)
RodBinding<VariantContext> maskAlleles;
@Argument(doc="Lower case SNPs rather than replacing with 'N'",fullName="lowerCaseSNPs",required=false)
boolean lowerCaseSNPs = false;
/**
* BWA single-end alignment is used as a primer specificity proxy. Low-complexity regions (that don't align back to themselves as a best hit) are lowercased.
* This changes the size of the k-mer used for alignment.
*/
@Argument(doc="Size of the virtual primer to use for lower-casing regions with low specificity",fullName="virtualPrimerSize",required=false)
int virtualPrimerSize = 20;

View File

@ -122,9 +122,6 @@ public class VariantEvalWalker extends RodWalker<Integer, Integer> implements Tr
@Argument(fullName="doNotUseAllStandardStratifications", shortName="noST", doc="Do not use the standard stratification modules by default (instead, only those that are specified with the -S option)", required=false)
protected Boolean NO_STANDARD_STRATIFICATIONS = false;
@Argument(fullName="onlyVariantsOfType", shortName="VT", doc="If provided, only variants of these types will be considered during the evaluation, in ", required=false)
protected Set<VariantContext.Type> typesToUse = null;
/**
* See the -list argument to view available modules.
*/
@ -317,9 +314,9 @@ public class VariantEvalWalker extends RodWalker<Integer, Integer> implements Tr
// find the comp
final VariantContext comp = findMatchingComp(eval, compSet);
HashMap<VariantStratifier, ArrayList<String>> stateMap = new HashMap<VariantStratifier, ArrayList<String>>();
HashMap<VariantStratifier, List<String>> stateMap = new HashMap<VariantStratifier, List<String>>();
for ( VariantStratifier vs : stratificationObjects ) {
ArrayList<String> states = vs.getRelevantStates(ref, tracker, comp, compRod.getName(), eval, evalRod.getName(), sampleName);
List<String> states = vs.getRelevantStates(ref, tracker, comp, compRod.getName(), eval, evalRod.getName(), sampleName);
stateMap.put(vs, states);
}

View File

@ -10,10 +10,13 @@ import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* Stratifies the eval RODs by the allele count of the alternate allele
*
* Looks at the AC value in the INFO field, and uses that value if present. If absent,
* computes the AC from the genotypes themselves. For no AC can be computed, 0 is used.
*/
public class AlleleCount extends VariantStratifier {
// needs to know the variant context
private ArrayList<String> states = new ArrayList<String>();
@Override
public void initialize() {
List<RodBinding<VariantContext>> evals = getVariantEvalWalker().getEvals();
@ -35,11 +38,7 @@ public class AlleleCount extends VariantStratifier {
getVariantEvalWalker().getLogger().info("AlleleCount using " + nchrom + " chromosomes");
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>(1);
if (eval != null) {

View File

@ -6,11 +6,15 @@ import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* Stratifies the eval RODs by the allele frequency of the alternate allele
*
* Uses a constant 0.005 frequency grid, and projects the AF INFO field value. Requires
* that AF be present in every ROD, otherwise this stratification throws an exception
*/
public class AlleleFrequency extends VariantStratifier {
// needs to know the variant context
private ArrayList<String> states;
@Override
public void initialize() {
states = new ArrayList<String>();
@ -19,11 +23,7 @@ public class AlleleFrequency extends VariantStratifier {
}
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
if (eval != null) {

View File

@ -6,22 +6,21 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* Required stratification grouping output by each comp ROD
*/
public class CompRod extends VariantStratifier implements RequiredStratification {
private ArrayList<String> states;
@Override
public void initialize() {
states = new ArrayList<String>();
for ( RodBinding<VariantContext> rod : getVariantEvalWalker().getComps() )
states.add(rod.getName());
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
relevantStates.add(compName);

View File

@ -5,23 +5,19 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* Stratifies the evaluation by each contig in the reference sequence
*/
public class Contig extends VariantStratifier {
// needs to know the variant context
private ArrayList<String> states;
@Override
public void initialize() {
states = new ArrayList<String>();
states.addAll(getVariantEvalWalker().getContigNames());
states.add("all");
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
if (eval != null) {

View File

@ -5,6 +5,7 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* CpG is a stratification module for VariantEval that divides the input data by within/not within a CpG site
@ -19,21 +20,14 @@ import java.util.ArrayList;
* A CpG site is defined as a site where the reference base at a locus is a C and the adjacent reference base in the 3' direction is a G.
*/
public class CpG extends VariantStratifier {
private ArrayList<String> states;
@Override
public void initialize() {
states = new ArrayList<String>();
states.add("all");
states.add("CpG");
states.add("non_CpG");
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
boolean isCpG = false;
if (ref != null && ref.getBases() != null) {
String fwRefBases = new String(ref.getBases());

View File

@ -7,10 +7,12 @@ import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
/**
* Experimental stratification by the degeneracy of an amino acid, according to VCF annotation. Not safe
*/
public class Degeneracy extends VariantStratifier {
private ArrayList<String> states;
private HashMap<String, HashMap<Integer, String>> degeneracies;
@Override
@ -77,11 +79,7 @@ public class Degeneracy extends VariantStratifier {
}
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
relevantStates.add("all");

View File

@ -6,10 +6,12 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* Required stratification grouping output by each eval ROD
*/
public class EvalRod extends VariantStratifier implements RequiredStratification {
private ArrayList<String> states;
@Override
public void initialize() {
states = new ArrayList<String>();
@ -17,11 +19,7 @@ public class EvalRod extends VariantStratifier implements RequiredStratification
states.add(rod.getName());
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
relevantStates.add(evalName);

View File

@ -5,24 +5,20 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* Stratifies by the FILTER status (PASS, FAIL) of the eval records
*/
public class Filter extends VariantStratifier {
// needs to know the variant context
private ArrayList<String> states;
@Override
public void initialize() {
states = new ArrayList<String>();
states.add("called");
states.add("filtered");
states.add("raw");
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
relevantStates.add("raw");

View File

@ -5,25 +5,22 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.List;
/**
* Stratifies by nonsense, missense, silent, and all annotations in the input ROD, from the INFO field annotation.
*/
public class FunctionalClass extends VariantStratifier {
// needs to know the variant context
private ArrayList<String> states;
@Override
public void initialize() {
states = new ArrayList<String>();
states.add("all");
states.add("silent");
states.add("missense");
states.add("nonsense");
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
relevantStates.add("all");

View File

@ -6,30 +6,30 @@ import org.broadinstitute.sting.gatk.walkers.varianteval.util.SortableJexlVCMatc
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import org.broadinstitute.sting.utils.variantcontext.VariantContextUtils;
import java.util.List;
import java.util.ArrayList;
import java.util.Set;
/**
* Stratifies the eval RODs by user-supplied JEXL expressions
*
* See http://www.broadinstitute.org/gsa/wiki/index.php/Using_JEXL_expressions for more details
*/
public class JexlExpression extends VariantStratifier implements StandardStratification {
// needs to know the jexl expressions
private Set<SortableJexlVCMatchExp> jexlExpressions;
private ArrayList<String> states;
@Override
public void initialize() {
jexlExpressions = getVariantEvalWalker().getJexlExpressions();
states = new ArrayList<String>();
states.add("none");
for ( SortableJexlVCMatchExp jexlExpression : jexlExpressions ) {
states.add(jexlExpression.name);
}
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
relevantStates.add("none");

View File

@ -7,32 +7,31 @@ import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.*;
/**
* Stratifies by whether a site in in the list of known RODs (e.g., dbsnp by default)
*/
public class Novelty extends VariantStratifier implements StandardStratification {
// needs the variant contexts and known names
private List<RodBinding<VariantContext>> knowns;
final private ArrayList<String> states = new ArrayList<String>(Arrays.asList("all", "known", "novel"));
@Override
public void initialize() {
states = new ArrayList<String>(Arrays.asList("all", "known", "novel"));
knowns = getVariantEvalWalker().getKnowns();
}
public ArrayList<String> getAllStates() {
return states;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
if (tracker != null && eval != null) {
final Collection<VariantContext> knownComps = tracker.getValues(knowns, ref.getLocus());
for ( final VariantContext c : knownComps ) {
// loop over sites, looking for something that matches the type eval
if ( eval.getType() == c.getType() ) {
return new ArrayList<String>(Arrays.asList("all", "known"));
return Arrays.asList("all", "known");
}
}
}
return new ArrayList<String>(Arrays.asList("all", "novel"));
return Arrays.asList("all", "novel");
}
}

View File

@ -4,26 +4,23 @@ import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Stratifies the eval RODs by each sample in the eval ROD.
*
* This allows the system to analyze each sample separately. Since many evaluations
* only consider non-reference sites, stratifying by sample results in meaningful
* calculations for CompOverlap
*/
public class Sample extends VariantStratifier {
// needs the sample names
private ArrayList<String> samples;
@Override
public void initialize() {
samples = new ArrayList<String>();
samples.addAll(getVariantEvalWalker().getSampleNamesForStratification());
states.addAll(getVariantEvalWalker().getSampleNamesForStratification());
}
public ArrayList<String> getAllStates() {
return samples;
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
ArrayList<String> relevantStates = new ArrayList<String>();
relevantStates.add(sampleName);
return relevantStates;
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
return Arrays.asList(sampleName);
}
}

View File

@ -6,9 +6,12 @@ import org.broadinstitute.sting.gatk.walkers.varianteval.VariantEvalWalker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public abstract class VariantStratifier implements Comparable {
private VariantEvalWalker variantEvalWalker;
protected ArrayList<String> states = new ArrayList<String>();
/**
* @return a reference to the parent VariantEvalWalker running this stratification
@ -27,15 +30,15 @@ public abstract class VariantStratifier implements Comparable {
public abstract void initialize();
public ArrayList<String> getAllStates() {
return new ArrayList<String>();
}
public ArrayList<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
return null;
}
public int compareTo(Object o1) {
return this.getClass().getSimpleName().compareTo(o1.getClass().getSimpleName());
}
public ArrayList<String> getAllStates() {
return states;
}
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2011, The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.gatk.walkers.varianteval.stratifications;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* Stratifies the eval variants by their type (SNP, INDEL, ETC)
*/
public class VariantType extends VariantStratifier {
@Override
public void initialize() {
for ( VariantContext.Type t : VariantContext.Type.values() ) {
states.add(t.toString());
}
}
public List<String> getRelevantStates(ReferenceContext ref, RefMetaDataTracker tracker, VariantContext comp, String compName, VariantContext eval, String evalName, String sampleName) {
return eval == null ? Collections.<String>emptyList() : Arrays.asList(eval.getType().toString());
}
}

View File

@ -266,10 +266,7 @@ public class VariantEvalUtils {
* @return a new VariantContext with just the requested sample
*/
public VariantContext getSubsetOfVariantContext(VariantContext vc, String sampleName) {
ArrayList<String> sampleNames = new ArrayList<String>();
sampleNames.add(sampleName);
return getSubsetOfVariantContext(vc, sampleNames);
return getSubsetOfVariantContext(vc, Arrays.asList(sampleName));
}
/**
@ -280,7 +277,7 @@ public class VariantEvalUtils {
* @return a new VariantContext with just the requested samples
*/
public VariantContext getSubsetOfVariantContext(VariantContext vc, Collection<String> sampleNames) {
VariantContext vcsub = vc.subContextFromGenotypes(vc.getGenotypes(sampleNames).values(), vc.getAlleles());
VariantContext vcsub = vc.subContextFromGenotypes(vc.getGenotypes(sampleNames).values());
HashMap<String, Object> newAts = new HashMap<String, Object>(vcsub.getAttributes());
@ -371,12 +368,12 @@ public class VariantEvalUtils {
* @param stateKeys all the state keys
* @return a list of state keys
*/
public ArrayList<StateKey> initializeStateKeys(HashMap<VariantStratifier, ArrayList<String>> stateMap, Stack<HashMap<VariantStratifier, ArrayList<String>>> stateStack, StateKey stateKey, ArrayList<StateKey> stateKeys) {
public ArrayList<StateKey> initializeStateKeys(HashMap<VariantStratifier, List<String>> stateMap, Stack<HashMap<VariantStratifier, List<String>>> stateStack, StateKey stateKey, ArrayList<StateKey> stateKeys) {
if (stateStack == null) {
stateStack = new Stack<HashMap<VariantStratifier, ArrayList<String>>>();
stateStack = new Stack<HashMap<VariantStratifier, List<String>>>();
for (VariantStratifier vs : stateMap.keySet()) {
HashMap<VariantStratifier, ArrayList<String>> oneSetOfStates = new HashMap<VariantStratifier, ArrayList<String>>();
HashMap<VariantStratifier, List<String>> oneSetOfStates = new HashMap<VariantStratifier, List<String>>();
oneSetOfStates.put(vs, stateMap.get(vs));
stateStack.add(oneSetOfStates);
@ -384,10 +381,10 @@ public class VariantEvalUtils {
}
if (!stateStack.isEmpty()) {
Stack<HashMap<VariantStratifier, ArrayList<String>>> newStateStack = new Stack<HashMap<VariantStratifier, ArrayList<String>>>();
Stack<HashMap<VariantStratifier, List<String>>> newStateStack = new Stack<HashMap<VariantStratifier, List<String>>>();
newStateStack.addAll(stateStack);
HashMap<VariantStratifier, ArrayList<String>> oneSetOfStates = newStateStack.pop();
HashMap<VariantStratifier, List<String>> oneSetOfStates = newStateStack.pop();
VariantStratifier vs = oneSetOfStates.keySet().iterator().next();
for (String state : oneSetOfStates.get(vs)) {

View File

@ -214,7 +214,7 @@ public class SelectVariants extends RodWalker<Integer, Integer> {
@Argument(fullName="sample_expressions", shortName="se", doc="Regular expression to select many samples from the ROD tracks provided. Can be specified multiple times", required=false)
public Set<String> sampleExpressions ;
@Argument(fullName="sample_file", shortName="sf", doc="File containing a list of samples (one per line) to include. Can be specified multiple times", required=false)
@Input(fullName="sample_file", shortName="sf", doc="File containing a list of samples (one per line) to include. Can be specified multiple times", required=false)
public Set<File> sampleFiles;
/**
@ -226,7 +226,7 @@ public class SelectVariants extends RodWalker<Integer, Integer> {
/**
* Note that sample exclusion takes precedence over inclusion, so that if a sample is in both lists it will be excluded.
*/
@Argument(fullName="exclude_sample_file", shortName="xl_sf", doc="File containing a list of samples (one per line) to exclude. Can be specified multiple times", required=false)
@Input(fullName="exclude_sample_file", shortName="xl_sf", doc="File containing a list of samples (one per line) to exclude. Can be specified multiple times", required=false)
public Set<File> XLsampleFiles = new HashSet<File>(0);
/**

View File

@ -26,7 +26,6 @@
package org.broadinstitute.sting.gatk.walkers.variantutils;
import org.broad.tribble.TribbleException;
import org.broad.tribble.dbsnp.DbSNPFeature;
import org.broadinstitute.sting.commandline.*;
import org.broadinstitute.sting.gatk.arguments.DbsnpArgumentCollection;
import org.broadinstitute.sting.gatk.arguments.StandardVariantContextInputArgumentCollection;
@ -41,7 +40,6 @@ import org.broadinstitute.sting.utils.variantcontext.VariantContext;
import java.io.File;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -168,14 +166,9 @@ public class ValidateVariants extends RodWalker<Integer, Integer> {
// get the RS IDs
Set<String> rsIDs = null;
if ( tracker.hasValues(dbsnp.dbsnp) ) {
List<VariantContext> dbsnpList = tracker.getValues(dbsnp.dbsnp, ref.getLocus());
rsIDs = new HashSet<String>();
for ( Object d : dbsnpList ) {
if (d instanceof DbSNPFeature )
rsIDs.add(((DbSNPFeature)d).getRsID());
else if (d instanceof VariantContext )
rsIDs.add(((VariantContext)d).getID());
}
for ( VariantContext rsID : tracker.getValues(dbsnp.dbsnp, ref.getLocus()) )
rsIDs.add(rsID.getID());
}
try {

View File

@ -252,7 +252,8 @@ public class ClippingOp {
if (start == 0 && stop == read.getReadLength() -1)
return new SAMRecord(read.getHeader());
CigarShift cigarShift = hardClipCigar(read.getCigar(), start, stop);
// If the read is unmapped there is no Cigar string and neither should we create a new cigar string
CigarShift cigarShift = (read.getReadUnmappedFlag()) ? new CigarShift(new Cigar(), 0, 0) : hardClipCigar(read.getCigar(), start, stop);
// the cigar may force a shift left or right (or both) in case we are left with insertions
// starting or ending the read after applying the hard clip on start/stop.

View File

@ -82,7 +82,7 @@ public class PileupElement {
// --------------------------------------------------------------------------
private Integer getReducedReadQualityTagValue() {
return (Integer)getRead().getAttribute(ReadUtils.REDUCED_READ_QUALITY_TAG);
return getRead().getIntegerAttribute(ReadUtils.REDUCED_READ_QUALITY_TAG);
}
public boolean isReducedRead() {

View File

@ -41,6 +41,10 @@ public class GATKSAMRecord extends SAMRecord {
// because some values can be null, we don't want to duplicate effort
private boolean retrievedReadGroup = false;
/** A private cache for the reduced read quality. Null indicates the value hasn't be fetched yet or isn't available */
private boolean lookedUpReducedReadQuality = false;
private Integer reducedReadQuality;
// These temporary attributes were added here to make life easier for
// certain algorithms by providing a way to label or attach arbitrary data to
// individual GATKSAMRecords.
@ -338,7 +342,17 @@ public class GATKSAMRecord extends SAMRecord {
public Object getAttribute(final String tag) { return mRecord.getAttribute(tag); }
public Integer getIntegerAttribute(final String tag) { return mRecord.getIntegerAttribute(tag); }
public Integer getIntegerAttribute(final String tag) {
if ( tag == ReadUtils.REDUCED_READ_QUALITY_TAG ) {
if ( ! lookedUpReducedReadQuality ) {
lookedUpReducedReadQuality = true;
reducedReadQuality = mRecord.getIntegerAttribute(tag);
}
return reducedReadQuality;
} else {
return mRecord.getIntegerAttribute(tag);
}
}
public Short getShortAttribute(final String tag) { return mRecord.getShortAttribute(tag); }

View File

@ -23,7 +23,7 @@ public class VariantsToVCFIntegrationTest extends WalkerTest {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-R " + b36KGReference +
" --variant:dbsnp " + GATKDataLocation + "Comparisons/Validated/dbSNP/dbsnp_129_b36.rod" +
" --variant:OldDbsnp " + GATKDataLocation + "Comparisons/Validated/dbSNP/dbsnp_129_b36.rod" +
" -T VariantsToVCF" +
" -L 1:1-30,000,000" +
" -o %s" +

View File

@ -31,8 +31,8 @@ public class CachingIndexedFastaSequenceFileUnitTest extends BaseTest {
private static final int STEP_SIZE = 1;
//private static final List<Integer> QUERY_SIZES = Arrays.asList(1);
private static final List<Integer> QUERY_SIZES = Arrays.asList(1, 10, 100, 1000);
private static final List<Integer> CACHE_SIZES = Arrays.asList(-1, 10, 1000);
private static final List<Integer> QUERY_SIZES = Arrays.asList(1, 10, 100);
private static final List<Integer> CACHE_SIZES = Arrays.asList(-1, 1000);
@DataProvider(name = "fastas")
public Object[][] createData1() {

View File

@ -11,7 +11,7 @@ import net.sf.samtools.SAMFileReader
import net.sf.samtools.SAMFileHeader.SortOrder
import org.broadinstitute.sting.queue.util.QScriptUtils
import org.broadinstitute.sting.queue.function.{CommandLineFunction, ListWriterFunction}
import org.broadinstitute.sting.queue.function.ListWriterFunction
class DataProcessingPipeline extends QScript {
qscript =>
@ -31,7 +31,7 @@ class DataProcessingPipeline extends QScript {
var reference: File = _
@Input(doc="dbsnp ROD to use (must be in VCF format)", fullName="dbsnp", shortName="D", required=true)
var dbSNP: File = _
var dbSNP: List[File] = List()
/****************************************************************************
* Optional Parameters
@ -43,7 +43,7 @@ class DataProcessingPipeline extends QScript {
//
@Input(doc="extra VCF files to use as reference indels for Indel Realignment", fullName="extra_indels", shortName="indels", required=false)
var indels: File = _
var indels: List[File] = List()
@Input(doc="The path to the binary of bwa (usually BAM files have already been mapped - but if you want to remap this is the option)", fullName="path_to_bwa", shortName="bwa", required=false)
var bwaPath: File = _
@ -159,7 +159,7 @@ class DataProcessingPipeline extends QScript {
for (rg <- readGroups) {
val intermediateInBam: File = if (index == readGroups.length) { inBam } else { swapExt(outBam, ".bam", index+1 + "-rg.bam") }
val intermediateOutBam: File = if (index > 1) {swapExt(outBam, ".bam", index + "-rg.bam") } else { outBam}
val readGroup = new ReadGroup(rg.getReadGroupId, rg.getPlatform, rg.getLibrary, rg.getPlatformUnit, rg.getSample, rg.getSequencingCenter, rg.getDescription)
val readGroup = new ReadGroup(rg.getReadGroupId, rg.getLibrary, rg.getPlatform, rg.getPlatformUnit, rg.getSample, rg.getSequencingCenter, rg.getDescription)
add(addReadGroup(intermediateInBam, intermediateOutBam, readGroup))
index = index - 1
}
@ -321,9 +321,9 @@ class DataProcessingPipeline extends QScript {
this.input_file = inBams
this.out = outIntervals
this.mismatchFraction = 0.0
this.known :+= qscript.dbSNP
this.known ++= qscript.dbSNP
if (indels != null)
this.known :+= qscript.indels
this.known ++= qscript.indels
this.scatterCount = nContigs
this.analysisName = queueLogDir + outIntervals + ".target"
this.jobName = queueLogDir + outIntervals + ".target"
@ -333,9 +333,9 @@ class DataProcessingPipeline extends QScript {
this.input_file = inBams
this.targetIntervals = tIntervals
this.out = outBam
this.known :+= qscript.dbSNP
this.known ++= qscript.dbSNP
if (qscript.indels != null)
this.known :+= qscript.indels
this.known ++= qscript.indels
this.consensusDeterminationModel = cleanModelEnum
this.compress = 0
this.scatterCount = nContigs
@ -344,7 +344,7 @@ class DataProcessingPipeline extends QScript {
}
case class cov (inBam: File, outRecalFile: File) extends CountCovariates with CommandLineGATKArgs {
this.knownSites :+= qscript.dbSNP
this.knownSites ++= qscript.dbSNP
this.covariate ++= List("ReadGroupCovariate", "QualityScoreCovariate", "CycleCovariate", "DinucCovariate")
this.input_file :+= inBam
this.recal_file = outRecalFile

View File

@ -1,6 +1,5 @@
package org.broadinstitute.sting.queue.qscripts
import org.broadinstitute.sting.commandline.Hidden
import org.broadinstitute.sting.queue.extensions.gatk._
import org.broadinstitute.sting.queue.QScript
import org.broadinstitute.sting.gatk.phonehome.GATKRunReport
@ -70,7 +69,8 @@ class MethodsDevelopmentCallingPipeline extends QScript {
val goldStandardClusterFile = new File(goldStandardName + ".clusters")
}
val hg19 = new File("/seq/references/Homo_sapiens_assembly19/v1/Homo_sapiens_assembly19.fasta")
val b37_decoy = new File("/humgen/1kg/reference/human_g1k_v37_decoy.fasta")
val hg19 = new File("/seq/references/Homo_sapiens_assembly19/v1/Homo_sapiens_assembly19.fasta")
val hg18 = new File("/seq/references/Homo_sapiens_assembly18/v0/Homo_sapiens_assembly18.fasta")
val b36 = new File("/humgen/1kg/reference/human_b36_both.fasta")
val b37 = new File("/humgen/1kg/reference/human_g1k_v37.fasta")
@ -124,6 +124,14 @@ class MethodsDevelopmentCallingPipeline extends QScript {
new File("/humgen/gsa-hpprojects/NA12878Collection/bams/CEUTrio.HiSeq.WGS.bwa.cleaned.recal.bam"),
new File("/humgen/gsa-hpprojects/dev/carneiro/trio/analysis/snps/CEUTrio.WEx.filtered.vcf"), // ** THIS GOLD STANDARD NEEDS TO BE CORRECTED **
"/humgen/1kg/processing/pipeline_test_bams/whole_genome_chunked.hg19.intervals", 2.3, 99.0, !lowPass, !exome, 3),
"WExTrioDecoy" -> new Target("CEUTrio.HiSeq.WEx.b37_decoy", b37_decoy, dbSNP_b37, hapmap_b37, indelMask_b37,
new File("/humgen/gsa-hpprojects/NA12878Collection/bams/CEUTrio.HiSeq.WEx.b37_decoy.list"),
new File("/humgen/gsa-hpprojects/dev/carneiro/trio/analysis/snps/CEUTrio.WEx.filtered.vcf"), // ** THIS GOLD STANDARD NEEDS TO BE CORRECTED **
"/seq/references/HybSelOligos/whole_exome_agilent_1.1_refseq_plus_3_boosters/whole_exome_agilent_1.1_refseq_plus_3_boosters.Homo_sapiens_assembly19.targets.interval_list", 3.3, 98.0, !lowPass, exome, 3),
"WGSTrioDecoy" -> new Target("CEUTrio.HiSeq.WGS.b37_decoy", b37_decoy, dbSNP_b37, hapmap_b37, indelMask_b37,
new File("/humgen/gsa-hpprojects/NA12878Collection/bams/CEUTrio.HiSeq.WGS.b37_decoy.list"),
new File("/humgen/gsa-hpprojects/dev/carneiro/trio/analysis/snps/CEUTrio.WEx.filtered.vcf"), // ** THIS GOLD STANDARD NEEDS TO BE CORRECTED **
"/humgen/1kg/processing/pipeline_test_bams/whole_genome_chunked.hg19.intervals", 2.3, 99.0, !lowPass, !exome, 3),
"FIN" -> new Target("FIN", b37, dbSNP_b37, hapmap_b37, indelMask_b37,
new File("/humgen/1kg/processing/pipeline_test_bams/FIN.79sample.Nov2010.chr20.bam"),
new File("/humgen/gsa-hpprojects/dev/data/AugChr20Calls_v4_3state/ALL.august.v4.chr20.filtered.vcf"), // ** THIS GOLD STANDARD NEEDS TO BE CORRECTED **

View File

@ -1,31 +1,14 @@
<ivysettings>
<properties file="${ivy.settings.dir}/ivysettings.properties"/>
<settings defaultResolver="libraries"/>
<settings defaultResolver="chain"/>
<resolvers>
<filesystem name="projects">
<artifact pattern="${repository.dir}/[organisation]/[artifact]-[revision].[ext]" />
<ivy pattern="${repository.dir}/[organisation]/[module]-[revision].xml" />
</filesystem>
<ibiblio name="libraries" m2compatible="true" />
<ibiblio name="libraries_with_inconsistent_poms" checkconsistency="false" m2compatible="true" />
<ibiblio name="reflections-repo" m2compatible="true" root="http://reflections.googlecode.com/svn/repo" />
<ibiblio name="java.net" m2compatible="false" root="http://download.java.net/maven/1/" pattern="[organisation]/jars/[artifact]-[revision].[ext]"/>
<ibiblio name="maven2-repository.dev.java.net" m2compatible="true" root="http://download.java.net/maven/2/" />
<chain name="chain">
<filesystem name="projects">
<artifact pattern="${repository.dir}/[organisation]/[artifact]-[revision].[ext]" />
<ivy pattern="${repository.dir}/[organisation]/[module]-[revision].xml" />
</filesystem>
<ibiblio name="reflections-repo" m2compatible="true" root="http://reflections.googlecode.com/svn/repo" />
<ibiblio name="maven" root="http://repo1.maven.org/maven2" m2compatible="true" />
</chain>
</resolvers>
<modules>
<module organisation="edu.mit.broad" resolver="projects" />
<module organisation="net.sf" module="functionalj" resolver="projects" />
<module organisation="net.sf" module="samtools" resolver="projects" />
<module organisation="org.reflections" module="reflections" resolver="reflections-repo" />
<module organisation="org.broad" module="tribble" resolver="projects" />
<module organisation="gov.nist" module="Jama" resolver="projects" />
<!-- If colt fixes the version in the pom for 1.2.0 then this line can be removed. -->
<module organisation="colt" module="colt" resolver="libraries_with_inconsistent_poms" />
<module organisation="javax.mail" resolver="java.net" />
<module organisation="javax.activation" resolver="java.net" />
<module organisation="net.java.dev.jna" resolver="maven2-repository.dev.java.net" />
<module organisation="com.google.code.caliper" resolver="projects" />
<module organisation="net.sf.gridscheduler" resolver="projects" />
<module organisation="com.google.code.cofoja" resolver="projects" />
</modules>
</ivysettings>

View File

@ -1,3 +1,3 @@
<ivy-module version="1.0">
<info organisation="org.broad" module="tribble" revision="21" status="integration" />
<info organisation="org.broad" module="tribble" revision="24" status="integration" />
</ivy-module>