diff --git a/.gitignore b/.gitignore
index 9a20b68ca..65f111587 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,6 @@
/*.bam
/*.bai
/*.bed
-*.idx
*~
/*.vcf
/*.txt
diff --git a/build.xml b/build.xml
index bb02c1ff1..12ebfa18f 100644
--- a/build.xml
+++ b/build.xml
@@ -91,9 +91,8 @@
This tool calculates the u-based z-approximation from the Mann-Whitney Rank Sum Test for base qualities(ref bases vs. bases of the alternate allele). The base quality rank sum test can not be calculated for sites without a mixture of reads showing both the reference and alternate alleles. This annotation tool outputs the following:
+ *
+ * Caveat
+ *
+ *
This tool calculates the u-based z-approximation from the Mann-Whitney Rank Sum Test for reads with clipped bases (reads with ref bases vs. those with the alternate allele).
+ * + *The clipping rank sum test can not be calculated for sites without a mixture of reads showing both the reference and alternate alleles.
+ * + * @author rpoplin + * @since 6/28/12 */ public class ClippingRankSumTest extends RankSumTest { @@ -83,12 +85,12 @@ public class ClippingRankSumTest extends RankSumTest { for (Map.EntryWhile the sample-level (FORMAT) DP field describes the total depth of reads that passed the caller's * internal quality control metrics (like MAPQ > 17, for example), the INFO field DP represents the unfiltered depth * over all samples. Note though that the DP is affected by downsampling (-dcov), so the max value one can obtain for * N samples with -dcov D is N * D + *
*/ public class Coverage extends InfoFieldAnnotation implements StandardAnnotation, ActiveRegionBasedAnnotation { diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/DepthPerAlleleBySample.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/DepthPerAlleleBySample.java index 5acea12f6..1cf91f181 100644 --- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/DepthPerAlleleBySample.java +++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/DepthPerAlleleBySample.java @@ -52,6 +52,7 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker; import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.AnnotatorCompatible; import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.GenotypeAnnotation; import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.StandardAnnotation; +import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele; import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap; import org.broadinstitute.variant.vcf.VCFConstants; import org.broadinstitute.variant.vcf.VCFFormatHeaderLine; @@ -72,11 +73,11 @@ import java.util.Map; /** - * The depth of coverage of each VCF allele in this sample. + * The depth of coverage of each allele per sample * - * The AD and DP are complementary fields that are two important ways of thinking about the depth of the data for this + *The AD and DP are complementary fields that are two important ways of thinking about the depth of the data for this * sample at this site. While the sample-level (FORMAT) DP field describes the total depth of reads that passed the - * Unified Genotyper's internal quality control metrics (like MAPQ > 17, for example), the AD values (one for each of + * caller's internal quality control metrics (like MAPQ > 17, for example), the AD values (one for each of * REF and ALT fields) is the unfiltered count of all reads that carried with them the * REF and ALT alleles. The reason for this distinction is that the DP is in some sense reflective of the * power I have to determine the genotype of the sample at this site, while the AD tells me how many times @@ -86,10 +87,12 @@ import java.util.Map; * normally be excluded from the statistical calculations going into GQ and QUAL. Please note, however, that * the AD isn't necessarily calculated exactly for indels. Only reads which are statistically favoring one allele over the other are counted. * Because of this fact, the sum of AD may be different than the individual sample depth, especially when there are - * many non-informatice reads. - * Because the AD includes reads and bases that were filtered by the Unified Genotyper and in case of indels is based on a statistical computation, + * many non-informative reads.
+ * + *Because the AD includes reads and bases that were filtered by the caller and in case of indels is based on a statistical computation, * one should not base assumptions about the underlying genotype based on it; - * instead, the genotype likelihoods (PLs) are what determine the genotype calls. + * instead, the genotype likelihoods (PLs) are what determine the genotype calls.
+ * */ public class DepthPerAlleleBySample extends GenotypeAnnotation implements StandardAnnotation { @@ -139,12 +142,12 @@ public class DepthPerAlleleBySample extends GenotypeAnnotation implements Standa } for (Map.EntryPhred-scaled p-value using Fisher's Exact Test to detect strand bias (the variation + * being seen on only the forward or only the reverse strand) in the reads. More bias is + * indicative of false positive calls. + *
+ * + *The Fisher Strand test may not be calculated for certain complex indel cases or for multi-allelic sites.
*/ public class FisherStrand extends InfoFieldAnnotation implements StandardAnnotation, ActiveRegionBasedAnnotation { + private final static Logger logger = Logger.getLogger(FisherStrand.class); + private static final String FS = "FS"; private static final double MIN_PVALUE = 1E-320; private static final int MIN_QUAL_FOR_FILTERED_TEST = 17; @@ -95,6 +104,8 @@ public class FisherStrand extends InfoFieldAnnotation implements StandardAnnotat else if (stratifiedPerReadAlleleLikelihoodMap != null) { // either SNP with no alignment context, or indels: per-read likelihood map needed final int[][] table = getContingencyTable(stratifiedPerReadAlleleLikelihoodMap, vc); +// logger.info("VC " + vc); +// printTable(table, 0.0); return pValueForBestTable(table, null); } else @@ -131,9 +142,6 @@ public class FisherStrand extends InfoFieldAnnotation implements StandardAnnotat private MapThe GC content is the number of GC bases relative to the total number of bases (# GC bases / # all bases) around this site on the reference.
+ * + *The window size used to calculate the GC content around the site is set by the tool used for annotation + * (currently UnifiedGenotyper, HaplotypeCaller or VariantAnnotator). See the Technical Document for each tool + * to find out what window size they use.
*/ -@DocumentedGATKFeature( groupName = HelpConstants.DOCS_CAT_QC, extraDocs = {CommandLineGATK.class} ) -public class GCContent extends InfoFieldAnnotation implements ExperimentalAnnotation { +public class GCContent extends InfoFieldAnnotation { public MapThis annotation calculates the Phred-scaled P value of genotype-based (using GT field) test for Hardy-Weinberg test for disequilibrium.
+ * + *Right now we just ignore genotypes that are not confident, but this throws off our HW ratios. + * More analysis is needed to determine the right thing to do when the genotyper cannot decide whether a given sample is het or hom var.
*/ -public class HardyWeinberg extends InfoFieldAnnotation implements WorkInProgressAnnotation { +public class HardyWeinberg extends InfoFieldAnnotation implements ExperimentalAnnotation { private static final int MIN_SAMPLES = 10; private static final int MIN_GENOTYPE_QUALITY = 10; diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/HomopolymerRun.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/HomopolymerRun.java index c25cb6820..4039241ac 100644 --- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/HomopolymerRun.java +++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/HomopolymerRun.java @@ -50,6 +50,7 @@ import org.broadinstitute.sting.gatk.contexts.AlignmentContext; import org.broadinstitute.sting.gatk.contexts.ReferenceContext; import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker; import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.AnnotatorCompatible; +import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.ExperimentalAnnotation; import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.InfoFieldAnnotation; import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap; import org.broadinstitute.sting.utils.GenomeLoc; @@ -63,9 +64,16 @@ import java.util.List; import java.util.Map; /** - * Largest contiguous homopolymer run of the variant allele in either direction on the reference. Computed only for bi-allelic sites. + * Largest contiguous homopolymer run of the variant allele + * + *Calculates the length of the largest contiguous homopolymer run of the variant allele in either direction on the reference.
+ * + *This can only be computed for bi-allelic sites.
+ *This needs to be computed in a more accurate manner. We currently look only at direct runs of the alternate allele adjacent to this position.
*/ -public class HomopolymerRun extends InfoFieldAnnotation { +public class HomopolymerRun extends InfoFieldAnnotation implements ExperimentalAnnotation { private boolean ANNOTATE_INDELS = true; diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MVLikelihoodRatio.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MVLikelihoodRatio.java index 19f32bae0..ad974a083 100644 --- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MVLikelihoodRatio.java +++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MVLikelihoodRatio.java @@ -65,13 +65,20 @@ import org.broadinstitute.variant.variantcontext.VariantContext; import java.util.*; /** - * Given a variant context, uses the genotype likelihoods to assess the likelihood of the site being a mendelian violation - * versus the likelihood of the site transmitting according to mendelian rules. This assumes that the organism is - * diploid. When multiple trios are present, the annotation is simply the maximum of the likelihood ratios, rather than - * the strict 1-Prod(1-p_i) calculation, as this can scale poorly for uncertain sites and many trios. + * Likelihood of being a Mendelian Violation + * + *Given a variant context, this tool uses the genotype likelihoods to assess the likelihood of the site being a mendelian violation + * versus the likelihood of the site transmitting according to mendelian rules.
+ * + *Note that this annotation requires a valid ped file.
+ * + *This tool assumes that the organism is diploid. When multiple trios are present, the annotation is simply the maximum + * of the likelihood ratios, rather than the strict 1-Prod(1-p_i) calculation, as this can scale poorly for uncertain + * sites and many trios.
*/ -public class MVLikelihoodRatio extends InfoFieldAnnotation implements ExperimentalAnnotation, RodRequiringAnnotation { +public class MVLikelihoodRatio extends InfoFieldAnnotation implements RodRequiringAnnotation { private MendelianViolation mendelianViolation = null; public static final String MVLR_KEY = "MVLR"; diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MappingQualityRankSumTest.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MappingQualityRankSumTest.java index 8c401eecd..3873138a2 100644 --- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MappingQualityRankSumTest.java +++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/MappingQualityRankSumTest.java @@ -47,6 +47,7 @@ package org.broadinstitute.sting.gatk.walkers.annotator; import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.StandardAnnotation; +import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele; import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap; import org.broadinstitute.variant.vcf.VCFHeaderLineType; import org.broadinstitute.variant.vcf.VCFInfoHeaderLine; @@ -59,8 +60,12 @@ import java.util.*; /** - * The u-based z-approximation from the Mann-Whitney Rank Sum Test for mapping qualities (reads with ref bases vs. those with the alternate allele) - * Note that the mapping quality rank sum test can not be calculated for sites without a mixture of reads showing both the reference and alternate alleles. + * U-based z-approximation from the Mann-Whitney Rank Sum Test for mapping qualities + * + *This tool calculates the u-based z-approximation from the Mann-Whitney Rank Sum Test for mapping qualities (reads with ref bases vs. those with the alternate allele).
+ * + *The mapping quality rank sum test can not be calculated for sites without a mixture of reads showing both the reference and alternate alleles.
*/ public class MappingQualityRankSumTest extends RankSumTest implements StandardAnnotation { @@ -88,13 +93,13 @@ public class MappingQualityRankSumTest extends RankSumTest implements StandardAn return; } for (Map.EntryThis tool calculates the u-based z-approximation from the Mann-Whitney Rank Sum Test for the distance from the end of the read for reads with the alternate allele. If the alternate allele is only seen near the ends of reads, this is indicative of error.
+ * + *The read position rank sum test can not be calculated for sites without a mixture of reads showing both the reference and alternate alleles.
*/ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotation { @@ -103,8 +108,8 @@ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotatio } for (Map.EntryNote that this annotation is currently not compatible with HaplotypeCaller.
*/ public class SpanningDeletions extends InfoFieldAnnotation implements StandardAnnotation { @@ -86,10 +89,12 @@ public class SpanningDeletions extends InfoFieldAnnotation implements StandardAn int deletions = 0; int depth = 0; for ( Map.EntryThis tool outputs the number of times the tandem repeat unit is repeated, for each allele (including reference).
+ * + *This annotation is currently not compatible with HaplotypeCaller.
+ */ public class TandemRepeatAnnotator extends InfoFieldAnnotation implements StandardAnnotation { private static final String STR_PRESENT = "STR"; private static final String REPEAT_UNIT_KEY = "RU"; diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/TransmissionDisequilibriumTest.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/TransmissionDisequilibriumTest.java index b3f5728a2..f8efd7c3f 100644 --- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/TransmissionDisequilibriumTest.java +++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/annotator/TransmissionDisequilibriumTest.java @@ -65,12 +65,21 @@ import org.broadinstitute.variant.variantcontext.VariantContext; import java.util.*; /** - * Created by IntelliJ IDEA. - * User: rpoplin, lfran, ebanks - * Date: 11/14/11 + * Wittkowski transmission disequilibrium test + * + *Test statistic from Wittkowski transmission disequilibrium test. + * The calculation is based on the following derivation in http://en.wikipedia.org/wiki/Transmission_disequilibrium_test#A_modified_version_of_the_TDT
+ * + *Note that this annotation requires a valid ped file.
+ * + *This annotation can only be used with VariantAnnotator (not with UnifiedGenotyper or HaplotypeCaller).
+ * + * @author rpoplin, lfran, ebanks + * @since 11/14/11 */ -public class TransmissionDisequilibriumTest extends InfoFieldAnnotation implements ExperimentalAnnotation, RodRequiringAnnotation { +public class TransmissionDisequilibriumTest extends InfoFieldAnnotation implements RodRequiringAnnotation { private SetThis tool assigns a roughly correct category of the variant type (SNP, MNP, insertion, deletion, etc.). + * It also specifies whether the variant is multiallelic (>2 alleles).
*/ -public class VariantType extends InfoFieldAnnotation implements ExperimentalAnnotation { +public class VariantType extends InfoFieldAnnotation { public Map* - *
* The input read data whose base quality scores need to be assessed. *
* A database of known polymorphic sites to skip over. *
* - ** A GATK Report file with many tables: *
* java -Xmx4g -jar GenomeAnalysisTK.jar \
* -T BaseRecalibrator \
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationArgumentCollection.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationArgumentCollection.java
index 5ab296a5f..0a4899f1c 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationArgumentCollection.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationArgumentCollection.java
@@ -61,7 +61,7 @@ import java.util.List;
* User: rpoplin
* Date: Nov 27, 2009
*
- * A collection of the arguments that are common to both CovariateCounterWalker and TableRecalibrationWalker.
+ * A collection of the arguments that are used for BQSR. Used to be common to both CovariateCounterWalker and TableRecalibrationWalker.
* This set of arguments will also be passed to the constructor of every Covariate when it is instantiated.
*/
@@ -91,7 +91,7 @@ public class RecalibrationArgumentCollection {
* If not provided, then no plots will be generated (useful for queue scatter/gathering).
* However, we *highly* recommend that users generate these plots whenever possible for QC checking.
*/
- @Output(fullName = "plot_pdf_file", shortName = "plots", doc = "The output recalibration pdf file to create", required = false)
+ @Output(fullName = "plot_pdf_file", shortName = "plots", doc = "The output recalibration pdf file to create", required = false, defaultToStdout = false)
public File RECAL_PDF_FILE = null;
/**
@@ -131,14 +131,14 @@ public class RecalibrationArgumentCollection {
public boolean RUN_WITHOUT_DBSNP = false;
/**
- * CountCovariates and TableRecalibration accept a --solid_recal_mode flag which governs how the recalibrator handles the
+ * BaseRecalibrator accepts a --solid_recal_mode flag which governs how the recalibrator handles the
* reads which have had the reference inserted because of color space inconsistencies.
*/
@Argument(fullName = "solid_recal_mode", shortName = "sMode", required = false, doc = "How should we recalibrate solid bases in which the reference was inserted? Options = DO_NOTHING, SET_Q_ZERO, SET_Q_ZERO_BASE_N, or REMOVE_REF_BIAS")
public RecalUtils.SOLID_RECAL_MODE SOLID_RECAL_MODE = RecalUtils.SOLID_RECAL_MODE.SET_Q_ZERO;
/**
- * CountCovariates and TableRecalibration accept a --solid_nocall_strategy flag which governs how the recalibrator handles
+ * BaseRecalibrator accepts a --solid_nocall_strategy flag which governs how the recalibrator handles
* no calls in the color space tag. Unfortunately because of the reference inserted bases mentioned above, reads with no calls in
* their color space tag can not be recalibrated.
*/
@@ -146,38 +146,38 @@ public class RecalibrationArgumentCollection {
public RecalUtils.SOLID_NOCALL_STRATEGY SOLID_NOCALL_STRATEGY = RecalUtils.SOLID_NOCALL_STRATEGY.THROW_EXCEPTION;
/**
- * The context covariate will use a context of this size to calculate it's covariate value for base mismatches
+ * The context covariate will use a context of this size to calculate its covariate value for base mismatches. Must be between 1 and 13 (inclusive). Note that higher values will increase runtime and required java heap size.
*/
- @Argument(fullName = "mismatches_context_size", shortName = "mcs", doc = "size of the k-mer context to be used for base mismatches", required = false)
+ @Argument(fullName = "mismatches_context_size", shortName = "mcs", doc = "Size of the k-mer context to be used for base mismatches", required = false)
public int MISMATCHES_CONTEXT_SIZE = 2;
/**
- * The context covariate will use a context of this size to calculate it's covariate value for base insertions and deletions
+ * The context covariate will use a context of this size to calculate its covariate value for base insertions and deletions. Must be between 1 and 13 (inclusive). Note that higher values will increase runtime and required java heap size.
*/
- @Argument(fullName = "indels_context_size", shortName = "ics", doc = "size of the k-mer context to be used for base insertions and deletions", required = false)
+ @Argument(fullName = "indels_context_size", shortName = "ics", doc = "Size of the k-mer context to be used for base insertions and deletions", required = false)
public int INDELS_CONTEXT_SIZE = 3;
/**
* The cycle covariate will generate an error if it encounters a cycle greater than this value.
* This argument is ignored if the Cycle covariate is not used.
*/
- @Argument(fullName = "maximum_cycle_value", shortName = "maxCycle", doc = "the maximum cycle value permitted for the Cycle covariate", required = false)
+ @Argument(fullName = "maximum_cycle_value", shortName = "maxCycle", doc = "The maximum cycle value permitted for the Cycle covariate", required = false)
public int MAXIMUM_CYCLE_VALUE = 500;
/**
- * A default base qualities to use as a prior (reported quality) in the mismatch covariate model. This value will replace all base qualities in the read for this default value. Negative value turns it off (default is off)
+ * A default base qualities to use as a prior (reported quality) in the mismatch covariate model. This value will replace all base qualities in the read for this default value. Negative value turns it off. [default is off]
*/
@Argument(fullName = "mismatches_default_quality", shortName = "mdq", doc = "default quality for the base mismatches covariate", required = false)
public byte MISMATCHES_DEFAULT_QUALITY = -1;
/**
- * A default base qualities to use as a prior (reported quality) in the insertion covariate model. This parameter is used for all reads without insertion quality scores for each base. (default is on)
+ * A default base qualities to use as a prior (reported quality) in the insertion covariate model. This parameter is used for all reads without insertion quality scores for each base. [default is on]
*/
@Argument(fullName = "insertions_default_quality", shortName = "idq", doc = "default quality for the base insertions covariate", required = false)
public byte INSERTIONS_DEFAULT_QUALITY = 45;
/**
- * A default base qualities to use as a prior (reported quality) in the mismatch covariate model. This value will replace all base qualities in the read for this default value. Negative value turns it off (default is off)
+ * A default base qualities to use as a prior (reported quality) in the mismatch covariate model. This value will replace all base qualities in the read for this default value. Negative value turns it off. [default is on]
*/
@Argument(fullName = "deletions_default_quality", shortName = "ddq", doc = "default quality for the base deletions covariate", required = false)
public byte DELETIONS_DEFAULT_QUALITY = 45;
@@ -220,7 +220,7 @@ public class RecalibrationArgumentCollection {
public String FORCE_PLATFORM = null;
@Hidden
- @Output(fullName = "recal_table_update_log", shortName = "recal_table_update_log", required = false, doc = "If provided, log all updates to the recalibration tables to the given file. For debugging/testing purposes only")
+ @Output(fullName = "recal_table_update_log", shortName = "recal_table_update_log", required = false, doc = "If provided, log all updates to the recalibration tables to the given file. For debugging/testing purposes only", defaultToStdout = false)
public PrintStream RECAL_TABLE_UPDATE_LOG = null;
/**
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationEngine.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationEngine.java
index 5e6e2a8d9..9f33234cf 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationEngine.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationEngine.java
@@ -178,7 +178,7 @@ public class RecalibrationEngine {
final NestedIntegerArray byQualTable = finalRecalibrationTables.getQualityScoreTable();
// iterate over all values in the qual table
- for ( NestedIntegerArray.Leaf leaf : byQualTable.getAllLeaves() ) {
+ for ( final NestedIntegerArray.Leaf leaf : byQualTable.getAllLeaves() ) {
final int rgKey = leaf.keys[0];
final int eventIndex = leaf.keys[2];
final RecalDatum rgDatum = byReadGroupTable.get(rgKey, eventIndex);
@@ -206,7 +206,9 @@ public class RecalibrationEngine {
*/
@Requires("! finalized")
private RecalibrationTables mergeThreadLocalRecalibrationTables() {
- if ( recalibrationTablesList.isEmpty() ) throw new IllegalStateException("recalibration tables list is empty");
+ if ( recalibrationTablesList.isEmpty() ) {
+ recalibrationTablesList.add( new RecalibrationTables(covariates, numReadGroups, maybeLogStream) );
+ }
RecalibrationTables merged = null;
for ( final RecalibrationTables table : recalibrationTablesList ) {
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationPerformance.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationPerformance.java
index fb11f6249..271617059 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationPerformance.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/bqsr/RecalibrationPerformance.java
@@ -47,6 +47,7 @@
package org.broadinstitute.sting.gatk.walkers.bqsr;
import org.broadinstitute.sting.commandline.*;
+import org.broadinstitute.sting.gatk.CommandLineGATK;
import org.broadinstitute.sting.gatk.contexts.AlignmentContext;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.filters.*;
@@ -55,18 +56,27 @@ import org.broadinstitute.sting.gatk.report.GATKReport;
import org.broadinstitute.sting.gatk.report.GATKReportTable;
import org.broadinstitute.sting.gatk.walkers.*;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
+import org.broadinstitute.sting.utils.help.DocumentedGATKFeature;
+import org.broadinstitute.sting.utils.help.HelpConstants;
import org.broadinstitute.sting.utils.recalibration.*;
import java.io.*;
/**
+ * Evaluate the performance of the base recalibration process
+ *
+ * This tool aims to evaluate the results of the Base Quality Score Recalibration (BQSR) process.
+ *
+ * Caveat
+ * This tool is currently experimental. We do not provide documentation nor support for its operation.
+ *
*/
-
+@DocumentedGATKFeature( groupName = HelpConstants.DOCS_CAT_QC, extraDocs = {CommandLineGATK.class} )
@ReadFilters({MappingQualityZeroFilter.class, MappingQualityUnavailableFilter.class, UnmappedReadFilter.class, NotPrimaryAlignmentFilter.class, DuplicateReadFilter.class, FailsVendorQualityCheckFilter.class})
@PartitionBy(PartitionType.READ)
public class RecalibrationPerformance extends RodWalker implements NanoSchedulable {
- @Output(doc="Write output to this file", required = true)
+ @Output
public PrintStream out;
@Input(fullName="recal", shortName="recal", required=false, doc="The input covariates table file")
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseAndQualsCounts.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseAndQualsCounts.java
index 7f8b0dded..28a48c212 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseAndQualsCounts.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseAndQualsCounts.java
@@ -53,39 +53,155 @@ package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
* @since 6/15/12
*/
public class BaseAndQualsCounts extends BaseCounts {
- private final long[] sumInsertionQuals;
- private final long[] sumDeletionQuals;
- public BaseAndQualsCounts() {
- super();
- this.sumInsertionQuals = new long[BaseIndex.values().length];
- this.sumDeletionQuals = new long[BaseIndex.values().length];
- // Java primitive arrays comes zero-filled, so no need to do it explicitly.
+ private long sumInsertionQual_A = 0;
+ private long sumDeletionQual_A = 0;
+ private long sumInsertionQual_C = 0;
+ private long sumDeletionQual_C = 0;
+ private long sumInsertionQual_G = 0;
+ private long sumDeletionQual_G = 0;
+ private long sumInsertionQual_T = 0;
+ private long sumDeletionQual_T = 0;
+ private long sumInsertionQual_D = 0;
+ private long sumDeletionQual_D = 0;
+ private long sumInsertionQual_I = 0;
+ private long sumDeletionQual_I = 0;
+ private long sumInsertionQual_N = 0;
+ private long sumDeletionQual_N = 0;
+
+ /*
+ * Increments the count
+ *
+ * @param base the base
+ * @param baseQual the base quality
+ * @param insQual the insertion quality
+ * @param delQual the deletion quality
+ * @param baseMappingQual the mapping quality
+ * @param isLowQualBase true if the base is low quality
+ */
+ public void incr(final byte base, final byte baseQual, final byte insQual, final byte delQual, final int baseMappingQual, final boolean isLowQualBase) {
+ incr(base, baseQual, insQual, delQual, baseMappingQual, isLowQualBase, false);
}
- public void incr(final byte base, final byte baseQual, final byte insQual, final byte delQual) {
+ /*
+ * Increments the count
+ *
+ * @param base the base
+ * @param baseQual the base quality
+ * @param insQual the insertion quality
+ * @param delQual the deletion quality
+ * @param baseMappingQual the mapping quality
+ * @param isLowQualBase true if the base is low quality
+ * @param isSoftClip true if is soft-clipped
+ */
+ public void incr(final byte base, final byte baseQual, final byte insQual, final byte delQual, final int baseMappingQual, final boolean isLowQualBase, final boolean isSoftClip) {
+ // if we already have high quality bases, ignore low quality ones
+ if ( isLowQualBase && !isLowQuality() )
+ return;
+
+ // if this is a high quality base then remove any low quality bases and start from scratch
+ if ( !isLowQualBase && isLowQuality() ) {
+ if ( totalCount() > 0 )
+ clear();
+ setLowQuality(false);
+ }
+
final BaseIndex i = BaseIndex.byteToBase(base);
- super.incr(i, baseQual);
- sumInsertionQuals[i.index] += insQual;
- sumDeletionQuals[i.index] += delQual;
+ super.incr(i, baseQual, baseMappingQual, isSoftClip);
+ switch (i) {
+ case A: sumInsertionQual_A += insQual; sumDeletionQual_A += delQual; break;
+ case C: sumInsertionQual_C += insQual; sumDeletionQual_C += delQual; break;
+ case G: sumInsertionQual_G += insQual; sumDeletionQual_G += delQual; break;
+ case T: sumInsertionQual_T += insQual; sumDeletionQual_T += delQual; break;
+ case D: sumInsertionQual_D += insQual; sumDeletionQual_D += delQual; break;
+ case I: sumInsertionQual_I += insQual; sumDeletionQual_I += delQual; break;
+ case N: sumInsertionQual_N += insQual; sumDeletionQual_N += delQual; break;
+ }
}
- public void decr(final byte base, final byte baseQual, final byte insQual, final byte delQual) {
+ /*
+ * Decrements the count
+ *
+ * @param base the base
+ * @param baseQual the base quality
+ * @param insQual the insertion quality
+ * @param delQual the deletion quality
+ * @param baseMappingQual the mapping quality
+ * @param isLowQualBase true if the base is low quality
+ */
+ public void decr(final byte base, final byte baseQual, final byte insQual, final byte delQual, final int baseMappingQual, final boolean isLowQualBase) {
+ decr(base, baseQual, insQual, delQual, baseMappingQual, isLowQualBase, false);
+ }
+
+ /*
+ * Decrements the count
+ *
+ * @param base the base
+ * @param baseQual the base quality
+ * @param insQual the insertion quality
+ * @param delQual the deletion quality
+ * @param baseMappingQual the mapping quality
+ * @param isLowQualBase true if the base is low quality
+ * @param isSoftClip true if is soft-clipped
+ */
+ public void decr(final byte base, final byte baseQual, final byte insQual, final byte delQual, final int baseMappingQual, final boolean isLowQualBase, final boolean isSoftClip) {
+ // if this is not the right type of base, ignore it
+ if ( isLowQualBase != isLowQuality() )
+ return;
+
final BaseIndex i = BaseIndex.byteToBase(base);
- super.decr(i, baseQual);
- sumInsertionQuals[i.index] -= insQual;
- sumDeletionQuals[i.index] -= delQual;
+ super.decr(i, baseQual, baseMappingQual, isSoftClip);
+ switch (i) {
+ case A: sumInsertionQual_A -= insQual; sumDeletionQual_A -= delQual; break;
+ case C: sumInsertionQual_C -= insQual; sumDeletionQual_C -= delQual; break;
+ case G: sumInsertionQual_G -= insQual; sumDeletionQual_G -= delQual; break;
+ case T: sumInsertionQual_T -= insQual; sumDeletionQual_T -= delQual; break;
+ case D: sumInsertionQual_D -= insQual; sumDeletionQual_D -= delQual; break;
+ case I: sumInsertionQual_I -= insQual; sumDeletionQual_I -= delQual; break;
+ case N: sumInsertionQual_N -= insQual; sumDeletionQual_N -= delQual; break;
+ }
}
public byte averageInsertionQualsOfBase(final BaseIndex base) {
- return getGenericAverageQualOfBase(base, sumInsertionQuals);
+ return (byte) (getInsertionQual(base) / countOfBase(base));
}
public byte averageDeletionQualsOfBase(final BaseIndex base) {
- return getGenericAverageQualOfBase(base, sumDeletionQuals);
+ return (byte) (getDeletionQual(base) / countOfBase(base));
}
- private byte getGenericAverageQualOfBase(final BaseIndex base, final long[] sumQuals) {
- return (byte) (sumQuals[base.index] / countOfBase(base));
+ private long getInsertionQual(final BaseIndex base) {
+ switch (base) {
+ case A: return sumInsertionQual_A;
+ case C: return sumInsertionQual_C;
+ case G: return sumInsertionQual_G;
+ case T: return sumInsertionQual_T;
+ case D: return sumInsertionQual_D;
+ case I: return sumInsertionQual_I;
+ case N: return sumInsertionQual_N;
+ default: throw new IllegalArgumentException(base.name());
+ }
+ }
+
+ private long getDeletionQual(final BaseIndex base) {
+ switch (base) {
+ case A: return sumDeletionQual_A;
+ case C: return sumDeletionQual_C;
+ case G: return sumDeletionQual_G;
+ case T: return sumDeletionQual_T;
+ case D: return sumDeletionQual_D;
+ case I: return sumDeletionQual_I;
+ case N: return sumDeletionQual_N;
+ default: throw new IllegalArgumentException(base.name());
+ }
+ }
+
+ /**
+ * Clears out all stored data in this object
+ */
+ public void clear() {
+ super.clear();
+ sumInsertionQual_A = sumInsertionQual_C = sumInsertionQual_G = sumInsertionQual_T = sumInsertionQual_D = sumInsertionQual_I = sumInsertionQual_N = 0;
+ sumDeletionQual_A = sumDeletionQual_C = sumDeletionQual_G = sumDeletionQual_T = sumDeletionQual_D = sumDeletionQual_I = sumDeletionQual_N = 0;
}
}
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseCounts.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseCounts.java
index 399cbd2a5..e1329db3b 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseCounts.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseCounts.java
@@ -48,6 +48,8 @@ package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
+import it.unimi.dsi.fastutil.ints.IntArrayList;
+import org.broadinstitute.sting.utils.MathUtils;
/**
@@ -62,70 +64,118 @@ import com.google.java.contract.Requires;
public final static BaseIndex MAX_BASE_INDEX_WITH_NO_COUNTS = BaseIndex.N;
public final static byte MAX_BASE_WITH_NO_COUNTS = MAX_BASE_INDEX_WITH_NO_COUNTS.getByte();
- private final int[] counts; // keeps track of the base counts
- private final long[] sumQuals; // keeps track of the quals of each base
- private int totalCount = 0; // keeps track of total count since this is requested so often
- public BaseCounts() {
- counts = new int[BaseIndex.values().length];
- sumQuals = new long[BaseIndex.values().length];
- // Java primitive arrays comes zero-filled, so no need to do it explicitly.
- }
+ private int count_A = 0; // keeps track of the base counts
+ private int sumQual_A = 0; // keeps track of the quals of each base
+ private int count_C = 0;
+ private int sumQual_C = 0;
+ private int count_G = 0;
+ private int sumQual_G = 0;
+ private int count_T = 0;
+ private int sumQual_T = 0;
+ private int count_D = 0;
+ private int sumQual_D = 0;
+ private int count_I = 0;
+ private int sumQual_I = 0;
+ private int count_N = 0;
+ private int sumQual_N = 0;
+ private int totalCount = 0; // keeps track of total count since this is requested so often
+ private int nSoftClippedBases = 0;
+ private final IntArrayList mappingQualities = new IntArrayList(); // keeps the mapping quality of each read that contributed to this
+ private boolean isLowQuality = true; // this object represents low quality bases unless we are told otherwise
+
public static BaseCounts createWithCounts(int[] countsACGT) {
BaseCounts baseCounts = new BaseCounts();
- baseCounts.counts[BaseIndex.A.index] = countsACGT[0];
- baseCounts.counts[BaseIndex.C.index] = countsACGT[1];
- baseCounts.counts[BaseIndex.G.index] = countsACGT[2];
- baseCounts.counts[BaseIndex.T.index] = countsACGT[3];
+ baseCounts.count_A = countsACGT[0];
+ baseCounts.count_C = countsACGT[1];
+ baseCounts.count_G = countsACGT[2];
+ baseCounts.count_T = countsACGT[3];
baseCounts.totalCount = countsACGT[0] + countsACGT[1] + countsACGT[2] + countsACGT[3];
return baseCounts;
}
@Requires("other != null")
public void add(final BaseCounts other) {
- for (final BaseIndex i : BaseIndex.values()) {
- final int otherCount = other.counts[i.index];
- counts[i.index] += otherCount;
- totalCount += otherCount;
- }
+ this.count_A += other.count_A;
+ this.count_C += other.count_C;
+ this.count_G += other.count_G;
+ this.count_T += other.count_T;
+ this.count_D += other.count_D;
+ this.count_I += other.count_I;
+ this.count_N += other.count_N;
+ this.totalCount += other.totalCount;
+ this.nSoftClippedBases = other.nSoftClippedBases;
+ this.mappingQualities.addAll(other.mappingQualities);
}
@Requires("other != null")
public void sub(final BaseCounts other) {
- for (final BaseIndex i : BaseIndex.values()) {
- final int otherCount = other.counts[i.index];
- counts[i.index] -= otherCount;
- totalCount -= otherCount;
- }
+ this.count_A -= other.count_A;
+ this.count_C -= other.count_C;
+ this.count_G -= other.count_G;
+ this.count_T -= other.count_T;
+ this.count_D -= other.count_D;
+ this.count_I -= other.count_I;
+ this.count_N -= other.count_N;
+ this.totalCount -= other.totalCount;
+ this.nSoftClippedBases -= other.nSoftClippedBases;
+ this.mappingQualities.removeAll(other.mappingQualities);
}
@Ensures("totalCount() == old(totalCount()) || totalCount() == old(totalCount()) + 1")
public void incr(final byte base) {
- final BaseIndex i = BaseIndex.byteToBase(base);
- counts[i.index]++;
- totalCount++;
+ add(BaseIndex.byteToBase(base), 1);
}
@Ensures("totalCount() == old(totalCount()) || totalCount() == old(totalCount()) + 1")
- public void incr(final BaseIndex base, final byte qual) {
- counts[base.index]++;
- totalCount++;
- sumQuals[base.index] += qual;
+ public void incr(final BaseIndex base, final byte qual, final int mappingQuality, final boolean isSoftclip) {
+ switch (base) {
+ case A: ++count_A; sumQual_A += qual; break;
+ case C: ++count_C; sumQual_C += qual; break;
+ case G: ++count_G; sumQual_G += qual; break;
+ case T: ++count_T; sumQual_T += qual; break;
+ case D: ++count_D; sumQual_D += qual; break;
+ case I: ++count_I; sumQual_I += qual; break;
+ case N: ++count_N; sumQual_N += qual; break;
+ }
+ ++totalCount;
+ nSoftClippedBases += isSoftclip ? 1 : 0;
+ mappingQualities.add(mappingQuality);
}
@Ensures("totalCount() == old(totalCount()) || totalCount() == old(totalCount()) - 1")
public void decr(final byte base) {
- final BaseIndex i = BaseIndex.byteToBase(base);
- counts[i.index]--;
- totalCount--;
+ add(BaseIndex.byteToBase(base), -1);
+ }
+
+ private void add(final BaseIndex base, int amount) {
+ switch(base) {
+ case A: count_A += amount; break;
+ case C: count_C += amount; break;
+ case G: count_G += amount; break;
+ case T: count_T += amount; break;
+ case D: count_D += amount; break;
+ case I: count_I += amount; break;
+ case N: count_N += amount; break;
+ }
+ totalCount += amount;
}
@Ensures("totalCount() == old(totalCount()) || totalCount() == old(totalCount()) - 1")
- public void decr(final BaseIndex base, final byte qual) {
- counts[base.index]--;
- totalCount--;
- sumQuals[base.index] -= qual;
+ public void decr(final BaseIndex base, final byte qual, final int mappingQuality, final boolean isSoftclip) {
+ switch (base) {
+ case A: --count_A; sumQual_A -= qual; break;
+ case C: --count_C; sumQual_C -= qual; break;
+ case G: --count_G; sumQual_G -= qual; break;
+ case T: --count_T; sumQual_T -= qual; break;
+ case D: --count_D; sumQual_D -= qual; break;
+ case I: --count_I; sumQual_I -= qual; break;
+ case N: --count_N; sumQual_N -= qual; break;
+ }
+ --totalCount;
+ nSoftClippedBases -= isSoftclip ? 1 : 0;
+ mappingQualities.remove((Integer) mappingQuality);
}
@Ensures("result >= 0")
@@ -135,7 +185,16 @@ import com.google.java.contract.Requires;
@Ensures("result >= 0")
public long getSumQuals(final BaseIndex base) {
- return sumQuals[base.index];
+ switch (base) {
+ case A: return sumQual_A;
+ case C: return sumQual_C;
+ case G: return sumQual_G;
+ case T: return sumQual_T;
+ case D: return sumQual_D;
+ case I: return sumQual_I;
+ case N: return sumQual_N;
+ default: throw new IllegalArgumentException(base.name());
+ }
}
@Ensures("result >= 0")
@@ -155,12 +214,21 @@ import com.google.java.contract.Requires;
@Ensures("result >= 0")
public int countOfBase(final BaseIndex base) {
- return counts[base.index];
+ switch (base) {
+ case A: return count_A;
+ case C: return count_C;
+ case G: return count_G;
+ case T: return count_T;
+ case D: return count_D;
+ case I: return count_I;
+ case N: return count_N;
+ default: throw new IllegalArgumentException(base.name());
+ }
}
@Ensures("result >= 0")
public long sumQualsOfBase(final BaseIndex base) {
- return sumQuals[base.index];
+ return getSumQuals(base);
}
@Ensures("result >= 0")
@@ -168,12 +236,25 @@ import com.google.java.contract.Requires;
return (byte) (sumQualsOfBase(base) / countOfBase(base));
}
+ @Ensures("result >= 0")
+ public int nSoftclips() {
+ return nSoftClippedBases;
+ }
@Ensures("result >= 0")
public int totalCount() {
return totalCount;
}
+ /**
+ * The RMS of the mapping qualities of all reads that contributed to this object
+ *
+ * @return the RMS of the mapping qualities of all reads that contributed to this object
+ */
+ public double getRMS() {
+ return MathUtils.rms(mappingQualities);
+ }
+
/**
* Given a base , it returns the proportional count of this base compared to all other bases
*
@@ -193,14 +274,14 @@ import com.google.java.contract.Requires;
*/
@Ensures({"result >=0.0", "result<= 1.0"})
public double baseCountProportion(final BaseIndex baseIndex) {
- return (totalCount == 0) ? 0.0 : (double)counts[baseIndex.index] / (double)totalCount;
+ return (totalCount == 0) ? 0.0 : (double)countOfBase(baseIndex) / (double)totalCount;
}
@Ensures("result != null")
public String toString() {
StringBuilder b = new StringBuilder();
for (final BaseIndex i : BaseIndex.values()) {
- b.append(i.toString()).append("=").append(counts[i.index]).append(",");
+ b.append(i.toString()).append("=").append(countOfBase(i)).append(",");
}
return b.toString();
}
@@ -209,22 +290,42 @@ import com.google.java.contract.Requires;
return baseIndexWithMostCounts().getByte();
}
+ /**
+ * @return the base index for which the count is highest, including indel indexes
+ */
@Ensures("result != null")
public BaseIndex baseIndexWithMostCounts() {
- BaseIndex maxI = MAX_BASE_INDEX_WITH_NO_COUNTS;
- for (final BaseIndex i : BaseIndex.values()) {
- if (counts[i.index] > counts[maxI.index])
- maxI = i;
- }
- return maxI;
+ return baseIndexWithMostCounts(true);
}
+ /**
+ * @return the base index for which the count is highest, excluding indel indexes
+ */
@Ensures("result != null")
public BaseIndex baseIndexWithMostCountsWithoutIndels() {
+ return baseIndexWithMostCounts(false);
+ }
+
+ /**
+ * Finds the base index with the most counts
+ *
+ * @param allowIndels should we allow base indexes representing indels?
+ * @return non-null base index
+ */
+ @Ensures("result != null")
+ protected BaseIndex baseIndexWithMostCounts(final boolean allowIndels) {
BaseIndex maxI = MAX_BASE_INDEX_WITH_NO_COUNTS;
+ int maxCount = countOfBase(maxI);
+
for (final BaseIndex i : BaseIndex.values()) {
- if (i.isNucleotide() && counts[i.index] > counts[maxI.index])
+ if ( !allowIndels && !i.isNucleotide() )
+ continue;
+
+ final int myCount = countOfBase(i);
+ if (myCount > maxCount) {
maxI = i;
+ maxCount = myCount;
+ }
}
return maxI;
}
@@ -235,27 +336,41 @@ import com.google.java.contract.Requires;
@Ensures("result != null")
public BaseIndex baseIndexWithMostProbability() {
- BaseIndex maxI = MAX_BASE_INDEX_WITH_NO_COUNTS;
- for (final BaseIndex i : BaseIndex.values()) {
- if (sumQuals[i.index] > sumQuals[maxI.index])
- maxI = i;
- }
- return (sumQuals[maxI.index] > 0L ? maxI : baseIndexWithMostCounts());
+ return baseIndexWithMostProbability(true);
}
@Ensures("result != null")
public BaseIndex baseIndexWithMostProbabilityWithoutIndels() {
+ return baseIndexWithMostProbability(false);
+ }
+
+ /**
+ * Finds the base index with the most probability
+ *
+ * @param allowIndels should we allow base indexes representing indels?
+ * @return non-null base index
+ */
+ @Ensures("result != null")
+ public BaseIndex baseIndexWithMostProbability(final boolean allowIndels) {
BaseIndex maxI = MAX_BASE_INDEX_WITH_NO_COUNTS;
+ long maxSum = getSumQuals(maxI);
+
for (final BaseIndex i : BaseIndex.values()) {
- if (i.isNucleotide() && sumQuals[i.index] > sumQuals[maxI.index])
+ if ( !allowIndels && !i.isNucleotide() )
+ continue;
+
+ final long mySum = getSumQuals(i);
+ if (mySum > maxSum) {
maxI = i;
+ maxSum = mySum;
+ }
}
- return (sumQuals[maxI.index] > 0L ? maxI : baseIndexWithMostCountsWithoutIndels());
+ return (maxSum > 0L ? maxI : baseIndexWithMostCounts(allowIndels));
}
@Ensures("result >=0")
public int totalCountWithoutIndels() {
- return totalCount - counts[BaseIndex.D.index] - counts[BaseIndex.I.index];
+ return totalCount - countOfBase(BaseIndex.D) - countOfBase(BaseIndex.I);
}
/**
@@ -268,10 +383,29 @@ import com.google.java.contract.Requires;
@Ensures({"result >=0.0", "result<= 1.0"})
public double baseCountProportionWithoutIndels(final BaseIndex base) {
final int total = totalCountWithoutIndels();
- return (total == 0) ? 0.0 : (double)counts[base.index] / (double)total;
+ return (total == 0) ? 0.0 : (double)countOfBase(base) / (double)total;
}
- public int[] countsArray() {
- return counts.clone();
+ /**
+ * @return true if this instance represents low quality bases
+ */
+ public boolean isLowQuality() { return isLowQuality; }
+
+ /**
+ * Sets the low quality value
+ *
+ * @param value true if this instance represents low quality bases false otherwise
+ */
+ public void setLowQuality(final boolean value) { isLowQuality = value; }
+
+ /**
+ * Clears out all stored data in this object
+ */
+ public void clear() {
+ count_A = count_C = count_G = count_T = count_D = count_I = count_N = 0;
+ sumQual_A = sumQual_C = sumQual_G = sumQual_T = sumQual_D = sumQual_I = sumQual_N = 0;
+ totalCount = 0;
+ nSoftClippedBases = 0;
+ mappingQualities.clear();
}
}
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseIndex.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseIndex.java
index e41878a0b..665e3e7ce 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseIndex.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/BaseIndex.java
@@ -121,7 +121,7 @@ public enum BaseIndex {
*
* @return whether or not it is a nucleotide, given the definition above
*/
- public boolean isNucleotide() {
+ public final boolean isNucleotide() {
return !isIndel();
}
@@ -130,7 +130,7 @@ public enum BaseIndex {
*
* @return true for I or D, false otherwise
*/
- public boolean isIndel() {
+ public final boolean isIndel() {
return this == D || this == I;
}
}
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompareBAM.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompareBAM.java
index a8a765ddc..36da92b4f 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompareBAM.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompareBAM.java
@@ -69,15 +69,15 @@ import java.util.Map;
*
* This is a test walker used for asserting that the ReduceReads procedure is not making blatant mistakes when compressing bam files.
*
- * Input
+ * Input
*
* Two BAM files (using -I) with different read group IDs
*
- * Output
+ * Output
*
* [Output description]
*
- * Examples
+ * Examples
*
* java
* -jar GenomeAnalysisTK.jar
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompressionStash.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompressionStash.java
index bd7bdfe89..22ea78521 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompressionStash.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/CompressionStash.java
@@ -46,10 +46,12 @@
package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
+import it.unimi.dsi.fastutil.objects.ObjectAVLTreeSet;
+import it.unimi.dsi.fastutil.objects.ObjectSortedSet;
import org.broadinstitute.sting.utils.*;
import java.util.Collection;
-import java.util.TreeSet;
+
/**
* A stash of regions that must be kept uncompressed in all samples
@@ -61,7 +63,7 @@ import java.util.TreeSet;
* Date: 10/15/12
* Time: 4:08 PM
*/
-public class CompressionStash extends TreeSet {
+public class CompressionStash extends ObjectAVLTreeSet {
public CompressionStash() {
super();
}
@@ -75,7 +77,7 @@ public class CompressionStash extends TreeSet {
*/
@Override
public boolean add(final FinishedGenomeLoc insertLoc) {
- TreeSet removedLocs = new TreeSet();
+ ObjectSortedSet removedLocs = new ObjectAVLTreeSet();
for (FinishedGenomeLoc existingLoc : this) {
if (existingLoc.isPast(insertLoc)) {
break; // if we're past the loc we're done looking for overlaps.
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/HeaderElement.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/HeaderElement.java
index 83efaa254..38b9e957b 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/HeaderElement.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/HeaderElement.java
@@ -46,10 +46,10 @@
package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
+import it.unimi.dsi.fastutil.objects.ObjectArrayList;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
-import java.util.LinkedList;
/**
* The element that describes the header of the sliding window.
@@ -62,9 +62,9 @@ public class HeaderElement {
private BaseAndQualsCounts consensusBaseCounts; // How many A,C,G,T (and D's) are in this site.
private BaseAndQualsCounts filteredBaseCounts; // How many A,C,G,T (and D's) were filtered out in this site.
private int insertionsToTheRight; // How many reads in this site had insertions to the immediate right
- private int nSoftClippedBases; // How many bases in this site came from soft clipped bases
private int location; // Genome location of this site (the sliding window knows which contig we're at
- private LinkedList mappingQuality; // keeps the mapping quality of each read that contributed to this element (site)
+
+ protected static final int MIN_COUNT_FOR_USING_PVALUE = 2;
public int getLocation() {
return location;
@@ -85,7 +85,7 @@ public class HeaderElement {
* @param location the reference location for the new element
*/
public HeaderElement(final int location) {
- this(new BaseAndQualsCounts(), new BaseAndQualsCounts(), 0, 0, location, new LinkedList());
+ this(new BaseAndQualsCounts(), new BaseAndQualsCounts(), 0, location);
}
/**
@@ -95,7 +95,7 @@ public class HeaderElement {
* @param location the reference location for the new element
*/
public HeaderElement(final int location, final int insertionsToTheRight) {
- this(new BaseAndQualsCounts(), new BaseAndQualsCounts(), insertionsToTheRight, 0, location, new LinkedList());
+ this(new BaseAndQualsCounts(), new BaseAndQualsCounts(), insertionsToTheRight, location);
}
/**
@@ -104,55 +104,67 @@ public class HeaderElement {
* @param consensusBaseCounts the BaseCounts object for the running consensus synthetic read
* @param filteredBaseCounts the BaseCounts object for the filtered data synthetic read
* @param insertionsToTheRight number of insertions to the right of this HeaderElement
- * @param nSoftClippedBases number of softclipped bases of this HeaderElement
* @param location the reference location of this reference element
- * @param mappingQuality the list of mapping quality values of all reads that contributed to this
* HeaderElement
*/
- public HeaderElement(BaseAndQualsCounts consensusBaseCounts, BaseAndQualsCounts filteredBaseCounts, int insertionsToTheRight, int nSoftClippedBases, int location, LinkedList mappingQuality) {
+ public HeaderElement(BaseAndQualsCounts consensusBaseCounts, BaseAndQualsCounts filteredBaseCounts, int insertionsToTheRight, int location) {
this.consensusBaseCounts = consensusBaseCounts;
this.filteredBaseCounts = filteredBaseCounts;
this.insertionsToTheRight = insertionsToTheRight;
- this.nSoftClippedBases = nSoftClippedBases;
this.location = location;
- this.mappingQuality = mappingQuality;
}
/**
* Whether or not the site represented by this HeaderElement is variant according to the definitions of variant
* by insertion, deletion and mismatches.
*
+ * @param minVariantPvalue min p-value for deciding that a position is or is not variable due to mismatches
+ * @param minVariantProportion min proportion for deciding that a position is or is not variable due to mismatches
+ * @param minIndelProportion min proportion for deciding that a position is or is not variable due to indels
* @return true if site is variant by any definition. False otherwise.
*/
- public boolean isVariant(double minVariantProportion, double minIndelProportion) {
- return hasConsensusData() && (isVariantFromInsertions(minIndelProportion) || isVariantFromMismatches(minVariantProportion) || isVariantFromDeletions(minIndelProportion) || isVariantFromSoftClips());
+ public boolean isVariant(final double minVariantPvalue, final double minVariantProportion, final double minIndelProportion) {
+ return hasConsensusData() && (isVariantFromInsertions(minIndelProportion) || isVariantFromMismatches(minVariantPvalue, minVariantProportion) || isVariantFromDeletions(minIndelProportion) || isVariantFromSoftClips());
}
/**
* Adds a new base to the HeaderElement updating all counts accordingly
*
- * @param base the base to add
+ * @param base the base to add
* @param baseQual the base quality
+ * @param insQual the base insertion quality
+ * @param delQual the base deletion quality
* @param baseMappingQuality the mapping quality of the read this base belongs to
+ * @param minBaseQual the minimum base qual allowed to be a good base
+ * @param minMappingQual the minimum mapping qual allowed to be a good read
+ * @param isSoftClipped true if the base is soft-clipped in the original read
*/
public void addBase(byte base, byte baseQual, byte insQual, byte delQual, int baseMappingQuality, int minBaseQual, int minMappingQual, boolean isSoftClipped) {
- if (basePassesFilters(baseQual, minBaseQual, baseMappingQuality, minMappingQual))
- consensusBaseCounts.incr(base, baseQual, insQual, delQual); // If the base passes filters, it is included in the consensus base counts
+ // If the base passes the MQ filter it is included in the consensus base counts, otherwise it's part of the filtered counts
+ if ( baseMappingQuality >= minMappingQual )
+ consensusBaseCounts.incr(base, baseQual, insQual, delQual, baseMappingQuality, baseQual < minBaseQual, isSoftClipped);
else
- filteredBaseCounts.incr(base, baseQual, insQual, delQual); // If the base fails filters, it is included with the filtered data base counts
-
- this.mappingQuality.add(baseMappingQuality); // Filtered or not, the RMS mapping quality includes all bases in this site
- nSoftClippedBases += isSoftClipped ? 1 : 0; // if this base is softclipped, add the counter
+ filteredBaseCounts.incr(base, baseQual, insQual, delQual, baseMappingQuality, baseQual < minBaseQual);
}
+ /**
+ * Adds a new base to the HeaderElement updating all counts accordingly
+ *
+ * @param base the base to add
+ * @param baseQual the base quality
+ * @param insQual the base insertion quality
+ * @param delQual the base deletion quality
+ * @param baseMappingQuality the mapping quality of the read this base belongs to
+ * @param minBaseQual the minimum base qual allowed to be a good base
+ * @param minMappingQual the minimum mapping qual allowed to be a good read
+ * @param isSoftClipped true if the base is soft-clipped in the original read
+ */
public void removeBase(byte base, byte baseQual, byte insQual, byte delQual, int baseMappingQuality, int minBaseQual, int minMappingQual, boolean isSoftClipped) {
- if (basePassesFilters(baseQual, minBaseQual, baseMappingQuality, minMappingQual))
- consensusBaseCounts.decr(base, baseQual, insQual, delQual); // If the base passes filters, it is included in the consensus base counts
+ // If the base passes the MQ filter it is included in the consensus base counts, otherwise it's part of the filtered counts
+ if ( baseMappingQuality >= minMappingQual )
+ consensusBaseCounts.decr(base, baseQual, insQual, delQual, baseMappingQuality, baseQual < minBaseQual, isSoftClipped);
else
- filteredBaseCounts.decr(base, baseQual, insQual, delQual); // If the base fails filters, it is included with the filtered data base counts
-
- this.mappingQuality.remove((Integer) baseMappingQuality); // Filtered or not, the RMS mapping quality includes all bases in this site
- nSoftClippedBases -= isSoftClipped ? 1 : 0; // if this base is softclipped, add the counter
+ filteredBaseCounts.decr(base, baseQual, insQual, delQual, baseMappingQuality, baseQual < minBaseQual);
}
/**
* Adds an insertions to the right of the HeaderElement and updates all counts accordingly. All insertions
@@ -189,15 +201,6 @@ public class HeaderElement {
return (!hasFilteredData() && !hasConsensusData());
}
- /**
- * The RMS of the mapping qualities of all reads that contributed to this HeaderElement
- *
- * @return the RMS of the mapping qualities of all reads that contributed to this HeaderElement
- */
- public double getRMS() {
- return MathUtils.rms(mappingQuality);
- }
-
/**
* removes an insertion from this element (if you removed a read that had an insertion)
*/
@@ -232,7 +235,7 @@ public class HeaderElement {
/**
* Whether or not the HeaderElement is variant due to excess deletions
*
- * @return whether or not the HeaderElement is variant due to excess insertions
+ * @return whether or not the HeaderElement is variant due to excess deletions
*/
private boolean isVariantFromDeletions(double minIndelProportion) {
return consensusBaseCounts.baseIndexWithMostCounts() == BaseIndex.D || consensusBaseCounts.baseCountProportion(BaseIndex.D) > minIndelProportion;
@@ -241,12 +244,15 @@ public class HeaderElement {
/**
* Whether or not the HeaderElement is variant due to excess mismatches
*
- * @return whether or not the HeaderElement is variant due to excess insertions
+ * @param minVariantPvalue the minimum pvalue to call a site variant (used with low coverage).
+ * @param minVariantProportion the minimum proportion to call a site variant (used with high coverage).
+ * @return whether or not the HeaderElement is variant due to excess mismatches
*/
- protected boolean isVariantFromMismatches(double minVariantProportion) {
- BaseIndex mostCommon = consensusBaseCounts.baseIndexWithMostProbabilityWithoutIndels();
- double mostCommonProportion = consensusBaseCounts.baseCountProportionWithoutIndels(mostCommon);
- return mostCommonProportion != 0.0 && mostCommonProportion < (1 - minVariantProportion);
+ protected boolean isVariantFromMismatches(final double minVariantPvalue, final double minVariantProportion) {
+ final int totalCount = consensusBaseCounts.totalCountWithoutIndels();
+ final BaseIndex mostCommon = consensusBaseCounts.baseIndexWithMostProbabilityWithoutIndels();
+ final int countOfOtherBases = totalCount - consensusBaseCounts.countOfBase(mostCommon);
+ return hasSignificantCount(countOfOtherBases, totalCount, minVariantPvalue, minVariantProportion);
}
/**
@@ -256,37 +262,88 @@ public class HeaderElement {
* @return true if we had more soft clipped bases contributing to this site than matches/mismatches.
*/
protected boolean isVariantFromSoftClips() {
+ final int nSoftClippedBases = consensusBaseCounts.nSoftclips();
return nSoftClippedBases > 0 && nSoftClippedBases >= (consensusBaseCounts.totalCount() - nSoftClippedBases);
}
- protected boolean basePassesFilters(byte baseQual, int minBaseQual, int baseMappingQuality, int minMappingQual) {
- return baseQual >= minBaseQual && baseMappingQuality >= minMappingQual;
+ /**
+ * Calculates the number of alleles necessary to represent this site.
+ *
+ * @param minVariantPvalue the minimum pvalue to call a site variant.
+ * @param minVariantProportion the minimum proportion to call a site variant.
+ * @return the number of alleles necessary to represent this site or -1 if there are too many indels
+ */
+ public int getNumberOfBaseAlleles(final double minVariantPvalue, final double minVariantProportion) {
+ final ObjectArrayList alleles = getAlleles(minVariantPvalue, minVariantProportion);
+ return alleles == null ? -1 : alleles.size();
}
/**
- * Calculates the number of haplotypes necessary to represent this site.
+ * Calculates the alleles necessary to represent this site.
*
+ * @param minVariantPvalue the minimum pvalue to call a site variant.
* @param minVariantProportion the minimum proportion to call a site variant.
- * @return the number of alleles necessary to represent this site.
+ * @return the list of alleles necessary to represent this site or null if there are too many indels
*/
- public int getNumberOfAlleles(final double minVariantProportion) {
+ public ObjectArrayList getAlleles(final double minVariantPvalue, final double minVariantProportion) {
+ // make sure we have bases at all
final int totalBaseCount = consensusBaseCounts.totalCount();
- if (totalBaseCount == 0)
- return 0;
+ if ( totalBaseCount == 0 )
+ return new ObjectArrayList(0);
- final int minBaseCountForRelevantAlleles = (int)(minVariantProportion * totalBaseCount);
+ // next, check for insertions; technically, the insertion count can be greater than totalBaseCount
+ // (because of the way insertions are counted), so we need to account for that
+ if ( hasSignificantCount(Math.min(totalBaseCount, insertionsToTheRight), totalBaseCount, minVariantPvalue, minVariantProportion) )
+ return null;
- int nAlleles = 0;
- for ( BaseIndex base : BaseIndex.values() ) {
+ // finally, check for the bases themselves (including deletions)
+ final ObjectArrayList alleles = new ObjectArrayList(4);
+ for ( final BaseIndex base : BaseIndex.values() ) {
final int baseCount = consensusBaseCounts.countOfBase(base);
-
- // don't consider this allele if the count is 0
if ( baseCount == 0 )
continue;
- if ( baseCount >= minBaseCountForRelevantAlleles )
- nAlleles++;
+ if ( hasSignificantCount(baseCount, totalBaseCount, minVariantPvalue, minVariantProportion) ) {
+ if ( base == BaseIndex.D )
+ return null;
+ alleles.add(base);
+ }
}
- return nAlleles;
+ return alleles;
+ }
+
+ /*
+ * Checks whether there are a significant number of softclips.
+ *
+ * @param minVariantPvalue the minimum pvalue to call a site variant.
+ * @param minVariantProportion the minimum proportion to call a site variant.
+ * @return true if there are significant softclips, false otherwise
+ */
+ public boolean hasSignificantSoftclips(final double minVariantPvalue, final double minVariantProportion) {
+ return hasSignificantCount(consensusBaseCounts.nSoftclips(), consensusBaseCounts.totalCount(), minVariantPvalue, minVariantProportion);
+ }
+
+ /*
+ * Checks whether there are a significant number of count.
+ *
+ * @param count the count (k) to test against
+ * @param total the total (n) to test against
+ * @param minVariantPvalue the minimum pvalue to call a site variant.
+ * @param minVariantProportion the minimum proportion to call a site variant.
+ * @return true if there is a significant count given the provided pvalue, false otherwise
+ */
+ private boolean hasSignificantCount(final int count, final int total, final double minVariantPvalue, final double minVariantProportion) {
+ if ( count == 0 || total == 0 )
+ return false;
+
+ // use p-values for low counts of k
+ if ( count <= MIN_COUNT_FOR_USING_PVALUE ) {
+ final double pvalue = MathUtils.binomialCumulativeProbability(total, 0, count);
+ return pvalue > minVariantPvalue;
+ }
+
+ // otherwise, use straight proportions
+ final int minBaseCountForSignificance = (int)(minVariantProportion * total);
+ return count >= minBaseCountForSignificance;
}
}
\ No newline at end of file
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/MultiSampleCompressor.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/MultiSampleCompressor.java
index d45efeb65..bdd407fba 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/MultiSampleCompressor.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/MultiSampleCompressor.java
@@ -46,18 +46,17 @@
package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
+import com.google.java.contract.Ensures;
+import it.unimi.dsi.fastutil.objects.*;
import net.sf.samtools.SAMFileHeader;
import org.apache.log4j.Logger;
+import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.SampleUtils;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
import org.broadinstitute.sting.utils.sam.AlignmentStartWithNoTiesComparator;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
/*
* Copyright (c) 2009 The Broad Institute
@@ -91,52 +90,72 @@ import java.util.TreeSet;
public class MultiSampleCompressor {
protected static final Logger logger = Logger.getLogger(MultiSampleCompressor.class);
- protected Map compressorsPerSample = new HashMap();
+ protected Object2ObjectMap compressorsPerSample = new Object2ObjectOpenHashMap();
public MultiSampleCompressor(SAMFileHeader header,
final int contextSize,
final int downsampleCoverage,
final int minMappingQuality,
+ final double minAltPValueToTriggerVariant,
final double minAltProportionToTriggerVariant,
final double minIndelProportionToTriggerVariant,
final int minBaseQual,
- final ReduceReads.DownsampleStrategy downsampleStrategy,
- final boolean allowPolyploidReduction) {
+ final ReduceReads.DownsampleStrategy downsampleStrategy) {
for ( String name : SampleUtils.getSAMFileSamples(header) ) {
compressorsPerSample.put(name,
new SingleSampleCompressor(contextSize, downsampleCoverage,
- minMappingQuality, minAltProportionToTriggerVariant, minIndelProportionToTriggerVariant, minBaseQual, downsampleStrategy, allowPolyploidReduction));
+ minMappingQuality, minAltPValueToTriggerVariant, minAltProportionToTriggerVariant, minIndelProportionToTriggerVariant, minBaseQual, downsampleStrategy));
}
}
- public Set addAlignment(GATKSAMRecord read) {
+ /**
+ * Add an alignment to the compressor
+ *
+ * @param read the read to be added
+ * @param knownSnpPositions the set of known SNP positions
+ * @return any compressed reads that may have resulted from adding this read to the machinery (due to the sliding window)
+ */
+ public ObjectSet addAlignment(final GATKSAMRecord read, final ObjectSortedSet knownSnpPositions) {
String sampleName = read.getReadGroup().getSample();
SingleSampleCompressor compressor = compressorsPerSample.get(sampleName);
if ( compressor == null )
throw new ReviewedStingException("No compressor for sample " + sampleName);
- Pair, CompressionStash> readsAndStash = compressor.addAlignment(read);
- Set reads = readsAndStash.getFirst();
+ Pair, CompressionStash> readsAndStash = compressor.addAlignment(read, knownSnpPositions);
+ ObjectSet reads = readsAndStash.getFirst();
CompressionStash regions = readsAndStash.getSecond();
- reads.addAll(closeVariantRegionsInAllSamples(regions));
+ reads.addAll(closeVariantRegionsInAllSamples(regions, knownSnpPositions));
return reads;
}
- public Set close() {
- Set reads = new TreeSet(new AlignmentStartWithNoTiesComparator());
+ /**
+ * Properly closes the compressor.
+ *
+ * @param knownSnpPositions the set of known SNP positions
+ * @return A non-null set/list of all reads generated
+ */
+ @Ensures("result != null")
+ public ObjectSet close(final ObjectSortedSet knownSnpPositions) {
+ ObjectSet reads = new ObjectAVLTreeSet(new AlignmentStartWithNoTiesComparator());
for ( SingleSampleCompressor sample : compressorsPerSample.values() ) {
- Pair, CompressionStash> readsAndStash = sample.close();
- reads = readsAndStash.getFirst();
+ Pair, CompressionStash> readsAndStash = sample.close(knownSnpPositions);
+ reads.addAll(readsAndStash.getFirst());
}
return reads;
}
- private Set closeVariantRegionsInAllSamples(CompressionStash regions) {
- Set reads = new TreeSet(new AlignmentStartWithNoTiesComparator());
+ /**
+ * Finalizes current variant regions.
+ *
+ * @param knownSnpPositions the set of known SNP positions
+ * @return A non-null set/list of all reads generated
+ */
+ private ObjectSet closeVariantRegionsInAllSamples(final CompressionStash regions, final ObjectSortedSet knownSnpPositions) {
+ ObjectSet reads = new ObjectAVLTreeSet(new AlignmentStartWithNoTiesComparator());
if (!regions.isEmpty()) {
for (SingleSampleCompressor sample : compressorsPerSample.values()) {
- reads.addAll(sample.closeVariantRegions(regions));
+ reads.addAll(sample.closeVariantRegions(regions, knownSnpPositions));
}
}
return reads;
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReads.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReads.java
index 8e45f6db1..71910e566 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReads.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReads.java
@@ -46,13 +46,15 @@
package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
+import it.unimi.dsi.fastutil.objects.Object2LongOpenHashMap;
+import it.unimi.dsi.fastutil.objects.ObjectAVLTreeSet;
+import it.unimi.dsi.fastutil.objects.ObjectArrayList;
+import it.unimi.dsi.fastutil.objects.ObjectSortedSet;
import net.sf.samtools.SAMFileHeader;
import net.sf.samtools.SAMFileWriter;
import net.sf.samtools.SAMProgramRecord;
import net.sf.samtools.util.SequenceUtil;
-import org.broadinstitute.sting.commandline.Argument;
-import org.broadinstitute.sting.commandline.Hidden;
-import org.broadinstitute.sting.commandline.Output;
+import org.broadinstitute.sting.commandline.*;
import org.broadinstitute.sting.gatk.CommandLineGATK;
import org.broadinstitute.sting.gatk.GenomeAnalysisEngine;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
@@ -65,13 +67,17 @@ import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.clipping.ReadClipper;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
+import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.help.DocumentedGATKFeature;
import org.broadinstitute.sting.utils.help.HelpConstants;
import org.broadinstitute.sting.utils.sam.BySampleSAMFileWriter;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
+import org.broadinstitute.variant.variantcontext.VariantContext;
+
+import java.util.Collections;
+import java.util.List;
-import java.util.*;
/**
* Reduces the BAM file using read based compression that keeps only essential information for variant calling
@@ -83,17 +89,17 @@ import java.util.*;
* shown to reduce a typical whole exome BAM file 100x. The higher the coverage, the bigger the
* savings in file size and performance of the downstream tools.
*
- * Input
+ * Input
*
* The BAM file to be compressed
*
*
- * Output
+ * Output
*
* The compressed (reduced) BAM file.
*
*
- * Examples
+ * Examples
*
* java -Xmx4g -jar GenomeAnalysisTK.jar \
* -R ref.fasta \
@@ -107,9 +113,9 @@ import java.util.*;
@PartitionBy(PartitionType.CONTIG)
@ReadFilters({UnmappedReadFilter.class, NotPrimaryAlignmentFilter.class, DuplicateReadFilter.class, FailsVendorQualityCheckFilter.class, BadCigarFilter.class})
@Downsample(by=DownsampleType.BY_SAMPLE, toCoverage=40)
-public class ReduceReads extends ReadWalker, ReduceReadsStash> {
+public class ReduceReads extends ReadWalker, ReduceReadsStash> {
- @Output
+ @Output(required = false, defaultToStdout = false)
private StingSAMFileWriter out = null;
private SAMFileWriter writerToUse = null;
@@ -117,7 +123,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
* The number of bases to keep around mismatches (potential variation)
*/
@Argument(fullName = "context_size", shortName = "cs", doc = "", required = false)
- private int contextSize = 10;
+ public int contextSize = 10;
/**
* The minimum mapping quality to be considered for the consensus synthetic read. Reads that have
@@ -125,7 +131,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
* towards variable regions.
*/
@Argument(fullName = "minimum_mapping_quality", shortName = "minmap", doc = "", required = false)
- private int minMappingQuality = 20;
+ public int minMappingQuality = 20;
/**
* The minimum base quality to be considered for the consensus synthetic read. Reads that have
@@ -133,41 +139,45 @@ public class ReduceReads extends ReadWalker, ReduceRea
* towards variable regions.
*/
@Argument(fullName = "minimum_base_quality_to_consider", shortName = "minqual", doc = "", required = false)
- private byte minBaseQual = 20;
+ public byte minBaseQual = 15;
/**
- * Reads have notoriously low quality bases on the tails (left and right). Consecutive bases with quality
- * lower than this threshold will be hard clipped off before entering the reduce reads algorithm.
+ * Reads have notoriously low quality bases on the tails (left and right). Consecutive bases at the tails with
+ * quality at or lower than this threshold will be hard clipped off before entering the reduce reads algorithm.
*/
@Argument(fullName = "minimum_tail_qualities", shortName = "mintail", doc = "", required = false)
- private byte minTailQuality = 2;
+ public byte minTailQuality = 2;
/**
- * Allow the experimental polyploid-based reduction capabilities of this tool
+ * Any number of VCF files representing known SNPs to be used for the polyploid-based reduction.
+ * Could be e.g. dbSNP and/or official 1000 Genomes SNP calls. Non-SNP variants in these files will be ignored.
+ * If provided, the polyploid ("het") compression will work only when a single SNP from the known set is present
+ * in a consensus window (otherwise there will be no reduction); if not provided then polyploid compression will
+ * be triggered anywhere there is a single SNP present in a consensus window.
*/
- @Argument(fullName = "allow_polyploid_reduction", shortName = "polyploid", doc = "", required = false)
- private boolean USE_POLYPLOID_REDUCTION = false;
+ @Input(fullName="known_sites_for_polyploid_reduction", shortName = "known", doc="Input VCF file(s) with known SNPs", required=false)
+ public List> known = Collections.emptyList();
/**
* Do not simplify read (strip away all extra information of the read -- anything other than bases, quals
* and read group).
*/
@Argument(fullName = "dont_simplify_reads", shortName = "nosimplify", doc = "", required = false)
- private boolean DONT_SIMPLIFY_READS = false;
+ public boolean DONT_SIMPLIFY_READS = false;
/**
* Do not hard clip adaptor sequences. Note: You don't have to turn this on for reads that are not mate paired.
* The program will behave correctly in those cases.
*/
@Argument(fullName = "dont_hardclip_adaptor_sequences", shortName = "noclip_ad", doc = "", required = false)
- private boolean DONT_CLIP_ADAPTOR_SEQUENCES = false;
+ public boolean DONT_CLIP_ADAPTOR_SEQUENCES = false;
/**
* Do not hard clip the low quality tails of the reads. This option overrides the argument of minimum tail
* quality.
*/
@Argument(fullName = "dont_hardclip_low_qual_tails", shortName = "noclip_tail", doc = "", required = false)
- private boolean DONT_CLIP_LOW_QUAL_TAILS = false;
+ public boolean DONT_CLIP_LOW_QUAL_TAILS = false;
/**
* Do not use high quality soft-clipped bases. By default, ReduceReads will hard clip away any low quality soft clipped
@@ -175,7 +185,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
* regions. The minimum quality for soft clipped bases is the same as the minimum base quality to consider (minqual)
*/
@Argument(fullName = "dont_use_softclipped_bases", shortName = "no_soft", doc = "", required = false)
- private boolean DONT_USE_SOFTCLIPPED_BASES = false;
+ public boolean DONT_USE_SOFTCLIPPED_BASES = false;
/**
* Do not compress read names. By default, ReduceReads will compress read names to numbers and guarantee
@@ -183,55 +193,68 @@ public class ReduceReads extends ReadWalker, ReduceRea
* there is no guarantee that read name uniqueness will be maintained -- in this case we recommend not compressing.
*/
@Argument(fullName = "dont_compress_read_names", shortName = "nocmp_names", doc = "", required = false)
- private boolean DONT_COMPRESS_READ_NAMES = false;
+ public boolean DONT_COMPRESS_READ_NAMES = false;
/**
* Optionally hard clip all incoming reads to the desired intervals. The hard clips will happen exactly at the interval
* border.
*/
@Argument(fullName = "hard_clip_to_interval", shortName = "clip_int", doc = "", required = false)
- private boolean HARD_CLIP_TO_INTERVAL = false;
+ public boolean HARD_CLIP_TO_INTERVAL = false;
/**
* Minimum proportion of mismatches in a site to trigger a variant region. Anything below this will be
- * considered consensus.
+ * considered consensus and reduced (otherwise we will try to trigger polyploid compression). Note that
+ * this value is used only regions with high coverage.
*/
+ @Advanced
@Argument(fullName = "minimum_alt_proportion_to_trigger_variant", shortName = "minvar", doc = "", required = false)
- private double minAltProportionToTriggerVariant = 0.05;
+ public double minAltProportionToTriggerVariant = 0.05;
+
+ /**
+ * Minimum p-value from binomial distribution of mismatches in a site to trigger a variant region.
+ * Any site with a value falling below this will be considered consensus and reduced (otherwise we will try to
+ * trigger polyploid compression). Note that this value is used only regions with low coverage.
+ */
+ @Advanced
+ @Argument(fullName = "minimum_alt_pvalue_to_trigger_variant", shortName = "min_pvalue", doc = "", required = false)
+ public double minAltPValueToTriggerVariant = 0.01;
/**
* Minimum proportion of indels in a site to trigger a variant region. Anything below this will be
* considered consensus.
*/
@Argument(fullName = "minimum_del_proportion_to_trigger_variant", shortName = "mindel", doc = "", required = false)
- private double minIndelProportionToTriggerVariant = 0.05;
+ public double minIndelProportionToTriggerVariant = 0.05;
/**
- * Downsamples the coverage of a variable region approximately (guarantees the minimum to be equal to this).
+ * The number of reads emitted per sample in a variant region can be downsampled for better compression.
+ * This level of downsampling only happens after the region has been evaluated, therefore it can
+ * be combined with the engine level downsampling.
* A value of 0 turns downsampling off.
*/
@Argument(fullName = "downsample_coverage", shortName = "ds", doc = "", required = false)
- private int downsampleCoverage = 250;
+ public int downsampleCoverage = 250;
@Hidden
@Argument(fullName = "nwayout", shortName = "nw", doc = "", required = false)
- private boolean nwayout = false;
+ public boolean nwayout = false;
@Hidden
@Argument(fullName = "", shortName = "dl", doc = "", required = false)
- private int debugLevel = 0;
+ public int debugLevel = 0;
@Hidden
@Argument(fullName = "", shortName = "dr", doc = "", required = false)
- private String debugRead = "";
+ public String debugRead = "";
@Hidden
@Argument(fullName = "downsample_strategy", shortName = "dm", doc = "", required = false)
- private DownsampleStrategy downsampleStrategy = DownsampleStrategy.Normal;
+ public DownsampleStrategy downsampleStrategy = DownsampleStrategy.Normal;
@Hidden
@Argument(fullName = "no_pg_tag", shortName = "npt", doc ="", required = false)
- private boolean NO_PG_TAG = false;
+ public boolean NO_PG_TAG = false;
public enum DownsampleStrategy {
Normal,
@@ -240,10 +263,12 @@ public class ReduceReads extends ReadWalker, ReduceRea
int nCompressedReads = 0;
- HashMap readNameHash; // This hash will keep the name of the original read the new compressed name (a number).
+ Object2LongOpenHashMap readNameHash; // This hash will keep the name of the original read the new compressed name (a number).
Long nextReadNumber = 1L; // The next number to use for the compressed read name.
- SortedSet intervalList;
+ ObjectSortedSet intervalList;
+
+ ObjectSortedSet knownSnpPositions;
// IMPORTANT: DO NOT CHANGE THE VALUE OF THIS CONSTANT VARIABLE; IT IS NOW PERMANENTLY THE @PG NAME THAT EXTERNAL TOOLS LOOK FOR IN THE BAM HEADER
public static final String PROGRAM_RECORD_NAME = "GATK ReduceReads"; // The name that will go in the @PG tag
@@ -256,17 +281,33 @@ public class ReduceReads extends ReadWalker, ReduceRea
@Override
public void initialize() {
super.initialize();
+
+ if ( !nwayout && out == null )
+ throw new UserException.MissingArgument("out", "the output must be provided and is optional only for certain debugging modes");
+
+ if ( nwayout && out != null )
+ throw new UserException.CommandLineException("--out and --nwayout can not be used simultaneously; please use one or the other");
+
+ if ( minAltPValueToTriggerVariant < 0.0 || minAltPValueToTriggerVariant > 1.0 )
+ throw new UserException.BadArgumentValue("--minimum_alt_pvalue_to_trigger_variant", "must be a value between 0 and 1 (inclusive)");
+
+ if ( minAltProportionToTriggerVariant < 0.0 || minAltProportionToTriggerVariant > 1.0 )
+ throw new UserException.BadArgumentValue("--minimum_alt_proportion_to_trigger_variant", "must be a value between 0 and 1 (inclusive)");
+
+ if ( known.isEmpty() )
+ knownSnpPositions = null;
+ else
+ knownSnpPositions = new ObjectAVLTreeSet();
+
GenomeAnalysisEngine toolkit = getToolkit();
- readNameHash = new HashMap(); // prepare the read name hash to keep track of what reads have had their read names compressed
- intervalList = new TreeSet(); // get the interval list from the engine. If no interval list was provided, the walker will work in WGS mode
+ readNameHash = new Object2LongOpenHashMap(100000); // prepare the read name hash to keep track of what reads have had their read names compressed
+ intervalList = new ObjectAVLTreeSet(); // get the interval list from the engine. If no interval list was provided, the walker will work in WGS mode
if (toolkit.getIntervals() != null)
intervalList.addAll(toolkit.getIntervals());
-
final boolean preSorted = true;
final boolean indexOnTheFly = true;
- final boolean keep_records = true;
final SAMFileHeader.SortOrder sortOrder = SAMFileHeader.SortOrder.coordinate;
if (nwayout) {
SAMProgramRecord programRecord = NO_PG_TAG ? null : Utils.createProgramRecord(toolkit, this, PROGRAM_RECORD_NAME);
@@ -276,7 +317,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
writerToUse = out;
out.setPresorted(false);
if (!NO_PG_TAG) {
- Utils.setupWriter(out, toolkit, toolkit.getSAMFileHeader(), !preSorted, keep_records, this, PROGRAM_RECORD_NAME);
+ Utils.setupWriter(out, toolkit, toolkit.getSAMFileHeader(), !preSorted, this, PROGRAM_RECORD_NAME);
}
}
}
@@ -295,8 +336,8 @@ public class ReduceReads extends ReadWalker, ReduceRea
* @return a linked list with all the reads produced by the clipping operations
*/
@Override
- public LinkedList map(ReferenceContext ref, GATKSAMRecord read, RefMetaDataTracker metaDataTracker) {
- LinkedList mappedReads;
+ public ObjectArrayList map(ReferenceContext ref, GATKSAMRecord read, RefMetaDataTracker metaDataTracker) {
+ ObjectArrayList mappedReads;
if (!debugRead.isEmpty() && read.getReadName().contains(debugRead))
System.out.println("Found debug read!");
@@ -325,18 +366,18 @@ public class ReduceReads extends ReadWalker, ReduceRea
if (HARD_CLIP_TO_INTERVAL)
mappedReads = hardClipReadToInterval(read); // Hard clip the remainder of the read to the desired interval
else {
- mappedReads = new LinkedList();
+ mappedReads = new ObjectArrayList();
mappedReads.add(read);
}
}
else {
- mappedReads = new LinkedList();
+ mappedReads = new ObjectArrayList();
if (!read.isEmpty())
mappedReads.add(read);
}
if (!mappedReads.isEmpty() && !DONT_USE_SOFTCLIPPED_BASES) {
- LinkedList tempList = new LinkedList();
+ ObjectArrayList tempList = new ObjectArrayList();
for (GATKSAMRecord mRead : mappedReads) {
GATKSAMRecord clippedRead = ReadClipper.hardClipLowQualitySoftClips(mRead, minBaseQual);
if (!clippedRead.isEmpty())
@@ -349,8 +390,22 @@ public class ReduceReads extends ReadWalker, ReduceRea
for (GATKSAMRecord mappedRead : mappedReads)
System.out.printf("MAPPED: %s %d %d\n", mappedRead.getCigar(), mappedRead.getAlignmentStart(), mappedRead.getAlignmentEnd());
- return mappedReads;
+ // add the SNPs to the list of known positions
+ populateKnownSNPs(metaDataTracker);
+ return mappedReads;
+ }
+
+ /*
+ * Add the positions of known SNPs to the set so that we can keep track of it
+ *
+ * @param metaDataTracker the ref meta data tracker
+ */
+ protected void populateKnownSNPs(final RefMetaDataTracker metaDataTracker) {
+ for ( final VariantContext vc : metaDataTracker.getValues(known) ) {
+ if ( vc.isSNP() )
+ knownSnpPositions.add(getToolkit().getGenomeLocParser().createGenomeLoc(vc));
+ }
}
/**
@@ -363,7 +418,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
*/
@Override
public ReduceReadsStash reduceInit() {
- return new ReduceReadsStash(new MultiSampleCompressor(getToolkit().getSAMFileHeader(), contextSize, downsampleCoverage, minMappingQuality, minAltProportionToTriggerVariant, minIndelProportionToTriggerVariant, minBaseQual, downsampleStrategy, USE_POLYPLOID_REDUCTION));
+ return new ReduceReadsStash(new MultiSampleCompressor(getToolkit().getSAMFileHeader(), contextSize, downsampleCoverage, minMappingQuality, minAltPValueToTriggerVariant, minAltProportionToTriggerVariant, minIndelProportionToTriggerVariant, minBaseQual, downsampleStrategy));
}
/**
@@ -375,7 +430,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
* @param stash the stash that keeps the reads in order for processing
* @return the stash with all reads that have not been processed yet
*/
- public ReduceReadsStash reduce(LinkedList mappedReads, ReduceReadsStash stash) {
+ public ReduceReadsStash reduce(ObjectArrayList mappedReads, ReduceReadsStash stash) {
if (debugLevel == 1)
stash.print();
@@ -387,7 +442,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
throw new ReviewedStingException("Empty read sent to reduce, this should never happen! " + read.getReadName() + " -- " + read.getCigar() + " -- " + read.getReferenceName() + ":" + read.getAlignmentStart() + "-" + read.getAlignmentEnd());
if (originalRead) {
- List readsReady = new LinkedList();
+ ObjectArrayList readsReady = new ObjectArrayList();
readsReady.addAll(stash.getAllReadsBefore(read));
readsReady.add(read);
@@ -395,9 +450,16 @@ public class ReduceReads extends ReadWalker, ReduceRea
if (debugLevel == 1)
System.out.println("REDUCE: " + readReady.getCigar() + " " + readReady.getAlignmentStart() + " " + readReady.getAlignmentEnd());
- for (GATKSAMRecord compressedRead : stash.compress(readReady))
+ for (GATKSAMRecord compressedRead : stash.compress(readReady, knownSnpPositions))
outputRead(compressedRead);
+ // We only care about maintaining the link between read pairs if they are in the same variant
+ // region. Since an entire variant region's worth of reads is returned in a single call to
+ // stash.compress(), the readNameHash can be cleared after the for() loop above.
+ // The advantage of clearing the hash is that otherwise it holds all reads that have been encountered,
+ // which can use a lot of memory and cause RR to slow to a crawl and/or run out of memory.
+ readNameHash.clear();
+
}
} else
stash.add(read);
@@ -405,6 +467,10 @@ public class ReduceReads extends ReadWalker, ReduceRea
firstRead = false;
}
+ // reduce memory requirements by removing old positions
+ if ( !mappedReads.isEmpty() )
+ clearStaleKnownPositions(mappedReads.get(0));
+
return stash;
}
@@ -417,13 +483,38 @@ public class ReduceReads extends ReadWalker, ReduceRea
public void onTraversalDone(ReduceReadsStash stash) {
// output any remaining reads in the compressor
- for (GATKSAMRecord read : stash.close())
+ for (GATKSAMRecord read : stash.close(knownSnpPositions))
outputRead(read);
if (nwayout)
writerToUse.close();
}
+ /**
+ * Removes known positions that are no longer relevant for use with het compression.
+ *
+ * @param read the current read, used for checking whether there are stale positions we can remove
+ */
+ protected void clearStaleKnownPositions(final GATKSAMRecord read) {
+ // nothing to clear if not used or empty
+ if ( knownSnpPositions == null || knownSnpPositions.isEmpty() )
+ return;
+
+ // not ready to be cleared until we encounter a read from a different contig
+ final int contigIndexOfRead = read.getReferenceIndex();
+ if ( knownSnpPositions.first().getContigIndex() == contigIndexOfRead )
+ return;
+
+ // because we expect most elements to be stale, it's not going to be efficient to remove them one at a time
+ final ObjectAVLTreeSet goodLocs = new ObjectAVLTreeSet();
+ for ( final GenomeLoc loc : knownSnpPositions ) {
+ if ( loc.getContigIndex() == contigIndexOfRead )
+ goodLocs.add(loc);
+ }
+ knownSnpPositions.clear();
+ knownSnpPositions.addAll(goodLocs);
+ }
+
/**
* Hard clips away all parts of the read that doesn't agree with the intervals selected.
*
@@ -433,8 +524,8 @@ public class ReduceReads extends ReadWalker, ReduceRea
* @param read the read to be hard clipped to the interval.
* @return a shallow copy of the read hard clipped to the interval
*/
- private LinkedList hardClipReadToInterval(GATKSAMRecord read) {
- LinkedList clippedReads = new LinkedList();
+ private ObjectArrayList hardClipReadToInterval(GATKSAMRecord read) {
+ ObjectArrayList clippedReads = new ObjectArrayList();
GenomeLoc intervalOverlapped = null; // marks the interval to which the original read overlapped (so we can cut all previous intervals from the list)
@@ -588,7 +679,7 @@ public class ReduceReads extends ReadWalker, ReduceRea
System.out.println("BAM: " + read.getCigar() + " " + read.getAlignmentStart() + " " + read.getAlignmentEnd());
if (!DONT_COMPRESS_READ_NAMES)
- compressReadName(read);
+ nextReadNumber = compressReadName(readNameHash, read, nextReadNumber);
writerToUse.addAlignment(read);
}
@@ -623,21 +714,28 @@ public class ReduceReads extends ReadWalker, ReduceRea
* Compresses the read name using the readNameHash if we have already compressed
* this read name before.
*
- * @param read any read
+ * @param hash the hash table containing the read name to compressed read name map
+ * @param read any read
+ * @param nextReadNumber the number to use in the compressed read name in case this is a new read name
+ * @return the next number to use in the compressed read name
*/
- private void compressReadName(GATKSAMRecord read) {
- String name = read.getReadName();
- String compressedName = read.isReducedRead() ? "C" : "";
- final Long readNumber = readNameHash.get(name);
- if (readNumber != null) {
- compressedName += readNumber.toString();
- } else {
- readNameHash.put(name, nextReadNumber);
- compressedName += nextReadNumber.toString();
- nextReadNumber++;
+ protected static long compressReadName(final Object2LongOpenHashMap hash, final GATKSAMRecord read, final long nextReadNumber) {
+ final String name = read.getReadName();
+ final StringBuilder compressedName = new StringBuilder();
+ long result = nextReadNumber;
+ if (read.isReducedRead()) {
+ compressedName.append("C");
}
-
- read.setReadName(compressedName);
+ final Long readNumber = hash.get(name);
+ if (readNumber != null) {
+ compressedName.append(readNumber);
+ } else {
+ hash.put(name, nextReadNumber);
+ compressedName.append(nextReadNumber);
+ result++;
+ }
+ read.setReadName(compressedName.toString());
+ return result;
}
/**
@@ -649,8 +747,8 @@ public class ReduceReads extends ReadWalker, ReduceRea
* @param read the read
* @return Returns true if the read is the original read that went through map().
*/
- private boolean isOriginalRead(LinkedList list, GATKSAMRecord read) {
- return isWholeGenome() || list.getFirst().equals(read);
+ private boolean isOriginalRead(ObjectArrayList list, GATKSAMRecord read) {
+ return isWholeGenome() || list.get(0).equals(read);
}
/**
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReadsStash.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReadsStash.java
index 0a446bab7..52c5f0903 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReadsStash.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/ReduceReadsStash.java
@@ -46,6 +46,8 @@
package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
+import it.unimi.dsi.fastutil.objects.ObjectSortedSet;
+import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.sam.AlignmentStartWithNoTiesComparator;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
@@ -106,11 +108,12 @@ public class ReduceReadsStash {
/**
* sends the read to the MultiSampleCompressor
*
- * @param read the read to be compressed
+ * @param read the read to be compressed
+ * @param knownSnpPositions the set of known SNP positions
* @return any compressed reads that may have resulted from adding this read to the machinery (due to the sliding window)
*/
- public Iterable compress(GATKSAMRecord read) {
- return compressor.addAlignment(read);
+ public Iterable compress(final GATKSAMRecord read, final ObjectSortedSet knownSnpPositions) {
+ return compressor.addAlignment(read, knownSnpPositions);
}
/**
@@ -125,18 +128,19 @@ public class ReduceReadsStash {
/**
* Close the stash, processing all remaining reads in order
*
+ * @param knownSnpPositions the set of known SNP positions
* @return a list of all the reads produced by the SlidingWindow machinery)
*/
- public Iterable close() {
+ public Iterable close(final ObjectSortedSet knownSnpPositions) {
LinkedList result = new LinkedList();
// compress all the stashed reads (in order)
for (GATKSAMRecord read : outOfOrderReads)
- for (GATKSAMRecord compressedRead : compressor.addAlignment(read))
+ for (GATKSAMRecord compressedRead : compressor.addAlignment(read, knownSnpPositions))
result.add(compressedRead);
// output any remaining reads from the compressor
- for (GATKSAMRecord read : compressor.close())
+ for (GATKSAMRecord read : compressor.close(knownSnpPositions))
result.add(read);
return result;
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SingleSampleCompressor.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SingleSampleCompressor.java
index b4de1f0cb..61c34b6a0 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SingleSampleCompressor.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SingleSampleCompressor.java
@@ -46,14 +46,13 @@
package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
+import com.google.java.contract.Ensures;
+import it.unimi.dsi.fastutil.objects.*;
+import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.sam.AlignmentStartWithNoTiesComparator;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
-import java.util.Collections;
-import java.util.Set;
-import java.util.TreeSet;
-
/**
*
* @author carneiro, depristo
@@ -63,38 +62,45 @@ public class SingleSampleCompressor {
final private int contextSize;
final private int downsampleCoverage;
final private int minMappingQuality;
+ final private double minAltPValueToTriggerVariant;
final private double minAltProportionToTriggerVariant;
final private double minIndelProportionToTriggerVariant;
final private int minBaseQual;
final private ReduceReads.DownsampleStrategy downsampleStrategy;
- final private boolean allowPolyploidReduction;
private SlidingWindow slidingWindow;
private int slidingWindowCounter;
- public static Pair, CompressionStash> emptyPair = new Pair,CompressionStash>(new TreeSet(), new CompressionStash());
+ public static Pair, CompressionStash> emptyPair = new Pair,CompressionStash>(new ObjectAVLTreeSet(), new CompressionStash());
public SingleSampleCompressor(final int contextSize,
final int downsampleCoverage,
final int minMappingQuality,
+ final double minAltPValueToTriggerVariant,
final double minAltProportionToTriggerVariant,
final double minIndelProportionToTriggerVariant,
final int minBaseQual,
- final ReduceReads.DownsampleStrategy downsampleStrategy,
- final boolean allowPolyploidReduction) {
+ final ReduceReads.DownsampleStrategy downsampleStrategy) {
this.contextSize = contextSize;
this.downsampleCoverage = downsampleCoverage;
this.minMappingQuality = minMappingQuality;
this.slidingWindowCounter = 0;
+ this.minAltPValueToTriggerVariant = minAltPValueToTriggerVariant;
this.minAltProportionToTriggerVariant = minAltProportionToTriggerVariant;
this.minIndelProportionToTriggerVariant = minIndelProportionToTriggerVariant;
this.minBaseQual = minBaseQual;
this.downsampleStrategy = downsampleStrategy;
- this.allowPolyploidReduction = allowPolyploidReduction;
}
- public Pair, CompressionStash> addAlignment( GATKSAMRecord read ) {
- Set reads = new TreeSet(new AlignmentStartWithNoTiesComparator());
+ /**
+ * Add an alignment to the compressor
+ *
+ * @param read the read to be added
+ * @param knownSnpPositions the set of known SNP positions
+ * @return any compressed reads that may have resulted from adding this read to the machinery (due to the sliding window)
+ */
+ public Pair, CompressionStash> addAlignment( final GATKSAMRecord read, final ObjectSortedSet knownSnpPositions ) {
+ ObjectSet reads = new ObjectAVLTreeSet(new AlignmentStartWithNoTiesComparator());
CompressionStash stash = new CompressionStash();
int readOriginalStart = read.getUnclippedStart();
@@ -104,27 +110,43 @@ public class SingleSampleCompressor {
(readOriginalStart - contextSize > slidingWindow.getStopLocation()))) { // this read is too far away from the end of the current sliding window
// close the current sliding window
- Pair, CompressionStash> readsAndStash = slidingWindow.close();
+ Pair, CompressionStash> readsAndStash = slidingWindow.close(knownSnpPositions);
reads = readsAndStash.getFirst();
stash = readsAndStash.getSecond();
slidingWindow = null; // so we create a new one on the next if
}
if ( slidingWindow == null) { // this is the first read
- slidingWindow = new SlidingWindow(read.getReferenceName(), read.getReferenceIndex(), contextSize, read.getHeader(), read.getReadGroup(), slidingWindowCounter, minAltProportionToTriggerVariant, minIndelProportionToTriggerVariant, minBaseQual, minMappingQuality, downsampleCoverage, downsampleStrategy, read.hasBaseIndelQualities(), allowPolyploidReduction);
+ slidingWindow = new SlidingWindow(read.getReferenceName(), read.getReferenceIndex(), contextSize, read.getHeader(), read.getReadGroup(),
+ slidingWindowCounter, minAltPValueToTriggerVariant, minAltProportionToTriggerVariant, minIndelProportionToTriggerVariant,
+ minBaseQual, minMappingQuality, downsampleCoverage, downsampleStrategy, read.hasBaseIndelQualities());
slidingWindowCounter++;
}
stash.addAll(slidingWindow.addRead(read));
- return new Pair, CompressionStash>(reads, stash);
+ return new Pair, CompressionStash>(reads, stash);
}
- public Pair, CompressionStash> close() {
- return (slidingWindow != null) ? slidingWindow.close() : emptyPair;
+ /**
+ * Properly closes the compressor.
+ *
+ * @param knownSnpPositions the set of known SNP positions
+ * @return A non-null set/list of all reads generated
+ */
+ @Ensures("result != null")
+ public Pair, CompressionStash> close(final ObjectSortedSet knownSnpPositions) {
+ return (slidingWindow != null) ? slidingWindow.close(knownSnpPositions) : emptyPair;
}
- public Set closeVariantRegions(CompressionStash regions) {
- return slidingWindow == null ? Collections.emptySet() : slidingWindow.closeVariantRegions(regions);
+ /**
+ * Finalizes current variant regions.
+ *
+ * @param knownSnpPositions the set of known SNP positions
+ * @return A non-null set/list of all reads generated
+ */
+ @Ensures("result != null")
+ public ObjectSet closeVariantRegions(final CompressionStash regions, final ObjectSortedSet knownSnpPositions) {
+ return slidingWindow == null ? ObjectSets.EMPTY_SET : slidingWindow.closeVariantRegions(regions, knownSnpPositions);
}
}
diff --git a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SlidingWindow.java b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SlidingWindow.java
index 680489042..d3ca037be 100644
--- a/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SlidingWindow.java
+++ b/protected/java/src/org/broadinstitute/sting/gatk/walkers/compression/reducereads/SlidingWindow.java
@@ -48,15 +48,18 @@ package org.broadinstitute.sting.gatk.walkers.compression.reducereads;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
-import net.sf.samtools.Cigar;
+import it.unimi.dsi.fastutil.bytes.Byte2IntArrayMap;
+import it.unimi.dsi.fastutil.bytes.Byte2IntMap;
+import it.unimi.dsi.fastutil.objects.*;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import net.sf.samtools.SAMFileHeader;
import org.broadinstitute.sting.gatk.downsampling.ReservoirDownsampler;
+import org.broadinstitute.sting.utils.BaseUtils;
import org.broadinstitute.sting.utils.GenomeLoc;
+import org.broadinstitute.sting.utils.UnvalidatingGenomeLoc;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
-import org.broadinstitute.sting.utils.recalibration.EventType;
import org.broadinstitute.sting.utils.sam.AlignmentStartWithNoTiesComparator;
import org.broadinstitute.sting.utils.sam.GATKSAMReadGroupRecord;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
@@ -64,6 +67,7 @@ import org.broadinstitute.sting.utils.sam.ReadUtils;
import java.util.*;
+
/**
* Created by IntelliJ IDEA.
* User: roger
@@ -73,8 +77,8 @@ import java.util.*;
public class SlidingWindow {
// Sliding Window data
- final private TreeSet readsInWindow;
- final private LinkedList windowHeader;
+ final protected PriorityQueue readsInWindow;
+ final protected LinkedList windowHeader;
protected int contextSize; // the largest context size (between mismatches and indels)
protected String contig;
protected int contigIndex;
@@ -92,9 +96,9 @@ public class SlidingWindow {
protected int filteredDataConsensusCounter;
protected String filteredDataReadName;
-
// Additional parameters
- protected double MIN_ALT_BASE_PROPORTION_TO_TRIGGER_VARIANT; // proportion has to be greater than this value to trigger variant region due to mismatches
+ protected double MIN_ALT_PVALUE_TO_TRIGGER_VARIANT; // pvalue has to be greater than this value to trigger variant region due to mismatches
+ protected double MIN_ALT_PROPORTION_TO_TRIGGER_VARIANT; // proportion has to be greater than this value to trigger variant region due to mismatches
protected double MIN_INDEL_BASE_PROPORTION_TO_TRIGGER_VARIANT; // proportion has to be greater than this value to trigger variant region due to deletions
protected int MIN_BASE_QUAL_TO_COUNT; // qual has to be greater than or equal to this value
protected int MIN_MAPPING_QUALITY;
@@ -102,8 +106,6 @@ public class SlidingWindow {
protected ReduceReads.DownsampleStrategy downsampleStrategy;
private boolean hasIndelQualities;
- private boolean allowPolyploidReductionInGeneral;
-
private static CompressionStash emptyRegions = new CompressionStash();
/**
@@ -119,8 +121,8 @@ public class SlidingWindow {
return getStopLocation(windowHeader);
}
- private int getStopLocation(LinkedList header) {
- return getStartLocation(header) + header.size() - 1;
+ private int getStopLocation(final LinkedList header) {
+ return header.isEmpty() ? -1 : header.peekLast().getLocation();
}
public String getContig() {
@@ -131,7 +133,7 @@ public class SlidingWindow {
return contigIndex;
}
- public int getStartLocation(LinkedList header) {
+ public int getStartLocation(final LinkedList header) {
return header.isEmpty() ? -1 : header.peek().getLocation();
}
@@ -144,24 +146,33 @@ public class SlidingWindow {
this.windowHeader = new LinkedList();
windowHeader.addFirst(new HeaderElement(startLocation));
- this.readsInWindow = new TreeSet();
+ this.readsInWindow = new PriorityQueue(100, new Comparator() {
+ @Override
+ public int compare(GATKSAMRecord read1, GATKSAMRecord read2) {
+ return read1.getSoftEnd() - read2.getSoftEnd();
+ }
+ });
}
- public SlidingWindow(String contig, int contigIndex, int contextSize, SAMFileHeader samHeader, GATKSAMReadGroupRecord readGroupAttribute, int windowNumber, final double minAltProportionToTriggerVariant, final double minIndelProportionToTriggerVariant, int minBaseQual, int minMappingQuality, int downsampleCoverage, final ReduceReads.DownsampleStrategy downsampleStrategy, boolean hasIndelQualities, boolean allowPolyploidReduction) {
+ public SlidingWindow(final String contig, final int contigIndex, final int contextSize, final SAMFileHeader samHeader,
+ final GATKSAMReadGroupRecord readGroupAttribute, final int windowNumber,
+ final double minAltPValueToTriggerVariant, final double minAltProportionToTriggerVariant, final double minIndelProportionToTriggerVariant,
+ final int minBaseQual, final int minMappingQuality, final int downsampleCoverage,
+ final ReduceReads.DownsampleStrategy downsampleStrategy, final boolean hasIndelQualities) {
this.contextSize = contextSize;
this.downsampleCoverage = downsampleCoverage;
- this.MIN_ALT_BASE_PROPORTION_TO_TRIGGER_VARIANT = minAltProportionToTriggerVariant;
+ this.MIN_ALT_PVALUE_TO_TRIGGER_VARIANT = minAltPValueToTriggerVariant;
+ this.MIN_ALT_PROPORTION_TO_TRIGGER_VARIANT = minAltProportionToTriggerVariant;
this.MIN_INDEL_BASE_PROPORTION_TO_TRIGGER_VARIANT = minIndelProportionToTriggerVariant;
this.MIN_BASE_QUAL_TO_COUNT = minBaseQual;
this.MIN_MAPPING_QUALITY = minMappingQuality;
this.windowHeader = new LinkedList();
- this.readsInWindow = new TreeSet(new Comparator() {
+ this.readsInWindow = new PriorityQueue(1000, new Comparator() {
@Override
public int compare(GATKSAMRecord read1, GATKSAMRecord read2) {
- final int difference = read1.getSoftEnd() - read2.getSoftEnd();
- return difference != 0 ? difference : read1.getReadName().compareTo(read2.getReadName());
+ return read1.getSoftEnd() - read2.getSoftEnd();
}
});
@@ -181,8 +192,6 @@ public class SlidingWindow {
this.downsampleStrategy = downsampleStrategy;
this.hasIndelQualities = hasIndelQualities;
-
- this.allowPolyploidReductionInGeneral = allowPolyploidReduction;
}
/**
@@ -286,8 +295,8 @@ public class SlidingWindow {
regions = findVariantRegions(0, breakpoint, markedSites.getVariantSiteBitSet(), !forceClose);
}
- while (!readsInWindow.isEmpty() && readsInWindow.first().getSoftEnd() < windowHeaderStartLocation) {
- readsInWindow.pollFirst();
+ while (!readsInWindow.isEmpty() && readsInWindow.peek().getSoftEnd() < windowHeaderStartLocation) {
+ readsInWindow.poll();
}
return regions;
@@ -340,10 +349,16 @@ public class SlidingWindow {
private final MarkedSites markedSites = new MarkedSites();
/**
- * returns an array marked with variant and non-variant regions (it uses
- * markVariantRegions to make the marks)
+ * returns the MarkedSites object so that it can be tested after adding data to the Sliding Window
*
- * @param stop check the window from start to stop (not-inclusive)
+ * @return the Marked Sites object used by this Sliding Window
+ */
+ protected MarkedSites getMarkedSitesForTesting() { return markedSites; }
+
+ /**
+ * returns an array marked with variant and non-variant regions (it uses markVariantRegion to make the marks)
+ *
+ * @param stop check the window from start to stop (not-inclusive); given in global coordinates
*/
protected void markSites(final int stop) {
@@ -353,22 +368,17 @@ public class SlidingWindow {
// copy over as many bits as we can from the previous calculation. Note that we can't trust the
// last (contextSize - 1) worth of bits because we may not have actually looked at variant regions there.
final int lastPositionMarked = markedSites.updateRegion(windowHeaderStartLocation, sizeOfMarkedRegion) - contextSize - 1;
- final int locationToProcess = Math.min(lastPositionMarked, stop - contextSize);
+ final int locationToProcess = Math.max(windowHeaderStartLocation, Math.min(lastPositionMarked, stop - contextSize));
- // update the iterator to the correct position
- Iterator headerElementIterator = windowHeader.iterator();
- for (int i = windowHeaderStartLocation; i < locationToProcess; i++) {
- if (headerElementIterator.hasNext())
- headerElementIterator.next();
- }
+ final ListIterator headerElementIterator = windowHeader.listIterator(locationToProcess - windowHeaderStartLocation);
// process a contextSize worth of region from scratch in case there's a variant there
for (int i = locationToProcess; i < stop; i++) {
if (headerElementIterator.hasNext()) {
HeaderElement headerElement = headerElementIterator.next();
- if (headerElement.isVariant(MIN_ALT_BASE_PROPORTION_TO_TRIGGER_VARIANT, MIN_INDEL_BASE_PROPORTION_TO_TRIGGER_VARIANT))
- markVariantRegion(markedSites, i - windowHeaderStartLocation);
+ if (headerElement.isVariant(MIN_ALT_PVALUE_TO_TRIGGER_VARIANT, MIN_ALT_PROPORTION_TO_TRIGGER_VARIANT, MIN_INDEL_BASE_PROPORTION_TO_TRIGGER_VARIANT))
+ markVariantRegion(i - windowHeaderStartLocation);
} else
break;
@@ -378,33 +388,44 @@ public class SlidingWindow {
/**
* Marks the sites around the variant site (as true)
*
- * @param markedSites the boolean array to bear the marks
* @param variantSiteLocation the location where a variant site was found
*/
- protected void markVariantRegion(final MarkedSites markedSites, final int variantSiteLocation) {
+ protected void markVariantRegion(final int variantSiteLocation) {
int from = (variantSiteLocation < contextSize) ? 0 : variantSiteLocation - contextSize;
- int to = (variantSiteLocation + contextSize + 1 > markedSites.getVariantSiteBitSet().length) ? markedSites.getVariantSiteBitSet().length : variantSiteLocation + contextSize + 1;
- for (int i = from; i < to; i++)
- markedSites.getVariantSiteBitSet()[i] = true;
+ int to = (variantSiteLocation + contextSize + 1 > markedSites.getVariantSiteBitSet().length) ? markedSites.getVariantSiteBitSet().length - 1 : variantSiteLocation + contextSize;
+ markRegionAs(from, to, true);
}
/**
- * Adds bases to the running consensus or filtered data accordingly
+ * Marks the sites around the variant site (as true)
+ *
+ * @param from the start index (inclusive) to mark
+ * @param to the end index (inclusive) to mark
+ * @param isVariant mark the region with this boolean value
+ */
+ private void markRegionAs(final int from, final int to, final boolean isVariant) {
+ for (int i = from; i <= to; i++)
+ markedSites.getVariantSiteBitSet()[i] = isVariant;
+ }
+
+ /**
+ * Adds bases to the running consensus
*
* If adding a sequence with gaps, it will finalize multiple consensus reads and keep the last running consensus
*
* @param header the window header
* @param start the first header index to add to consensus
* @param end the first header index NOT TO add to consensus
- * @param isNegativeStrand should the synthetic read be represented as being on the negative strand?
+ * @param strandType the strandedness that the synthetic read should be represented as having
* @return a non-null list of consensus reads generated by this call. Empty list if no consensus was generated.
*/
@Requires({"start >= 0 && (end >= start || end == 0)"})
@Ensures("result != null")
- protected List addToSyntheticReads(LinkedList header, int start, int end, boolean isNegativeStrand) {
- LinkedList reads = new LinkedList();
- if (start < end) {
- ListIterator headerElementIterator = header.listIterator(start);
+ protected ObjectArrayList addToSyntheticReads(final LinkedList header, final int start, final int end, final SyntheticRead.StrandType strandType) {
+ final ObjectArrayList reads = new ObjectArrayList();
+
+ if ( start < end ) {
+ final ListIterator headerElementIterator = header.listIterator(start);
if (!headerElementIterator.hasNext())
throw new ReviewedStingException(String.format("Requested to add to synthetic reads a region that contains no header element at index: %d - %d / %d", start, header.size(), end));
@@ -412,37 +433,29 @@ public class SlidingWindow {
HeaderElement headerElement = headerElementIterator.next();
if (headerElement.hasConsensusData()) {
- reads.addAll(finalizeAndAdd(ConsensusType.FILTERED));
-
- int endOfConsensus = findNextNonConsensusElement(header, start, end);
- addToRunningConsensus(header, start, endOfConsensus, isNegativeStrand);
+ // find the end of the consecutive consensus data in the window
+ final int endOfConsensus = findNextNonConsensusElement(header, start, end);
if (endOfConsensus <= start)
throw new ReviewedStingException(String.format("next start is <= current start: (%d <= %d)", endOfConsensus, start));
- reads.addAll(addToSyntheticReads(header, endOfConsensus, end, isNegativeStrand));
- } else if (headerElement.hasFilteredData()) {
+ // add to running consensus and recurse
+ addToRunningConsensus(header, start, endOfConsensus, strandType);
+ reads.addAll(addToSyntheticReads(header, endOfConsensus, end, strandType));
+
+ } else {
+
+ // add any outstanding consensus data
reads.addAll(finalizeAndAdd(ConsensusType.CONSENSUS));
- int endOfFilteredData = findNextNonFilteredDataElement(header, start, end);
- reads.addAll(addToFilteredData(header, start, endOfFilteredData, isNegativeStrand));
-
- if (endOfFilteredData <= start)
- throw new ReviewedStingException(String.format("next start is <= current start: (%d <= %d)", endOfFilteredData, start));
-
- reads.addAll(addToSyntheticReads(header, endOfFilteredData, end, isNegativeStrand));
- } else if (headerElement.isEmpty()) {
- reads.addAll(finalizeAndAdd(ConsensusType.BOTH));
-
- int endOfEmptyData = findNextNonEmptyElement(header, start, end);
-
+ // find the end of the consecutive empty data in the window
+ final int endOfEmptyData = findNextConsensusElement(header, start, end);
if (endOfEmptyData <= start)
throw new ReviewedStingException(String.format("next start is <= current start: (%d <= %d)", endOfEmptyData, start));
- reads.addAll(addToSyntheticReads(header, endOfEmptyData, end, isNegativeStrand));
- } else
- throw new ReviewedStingException(String.format("Header Element %d is neither Consensus, Data or Empty. Something is wrong.", start));
-
+ // recurse out of the empty region
+ reads.addAll(addToSyntheticReads(header, endOfEmptyData, end, strandType));
+ }
}
return reads;
@@ -454,24 +467,21 @@ public class SlidingWindow {
* @param type the synthetic reads you want to close
* @return a possibly null list of GATKSAMRecords generated by finalizing the synthetic reads
*/
- private List finalizeAndAdd(ConsensusType type) {
- GATKSAMRecord read = null;
- List list = new LinkedList();
+ private ObjectArrayList finalizeAndAdd(final ConsensusType type) {
- switch (type) {
- case CONSENSUS:
- read = finalizeRunningConsensus();
- break;
- case FILTERED:
- read = finalizeFilteredDataConsensus();
- break;
- case BOTH:
- read = finalizeRunningConsensus();
- if (read != null) list.add(read);
- read = finalizeFilteredDataConsensus();
+ final ObjectArrayList list = new ObjectArrayList();
+
+ if ( type == ConsensusType.CONSENSUS || type == ConsensusType.BOTH ) {
+ final GATKSAMRecord read = finalizeRunningConsensus();
+ if ( read != null )
+ list.add(read);
+ }
+
+ if ( type == ConsensusType.FILTERED || type == ConsensusType.BOTH ) {
+ final GATKSAMRecord read = finalizeFilteredDataConsensus();
+ if ( read != null )
+ list.add(read);
}
- if (read != null)
- list.add(read);
return list;
}
@@ -479,19 +489,145 @@ public class SlidingWindow {
/**
* Looks for the next position without consensus data
*
- * @param start beginning of the filtered region
- * @param upTo limit to search for another consensus element
+ * @param header the header to check
+ * @param start beginning of the filtered region
+ * @param upTo limit to search for another consensus element
* @return next position in local coordinates (relative to the windowHeader) with consensus data; otherwise, the start position
*/
- private int findNextNonConsensusElement(LinkedList header, int start, int upTo) {
- Iterator headerElementIterator = header.listIterator(start);
+ private int findNextNonConsensusElement(final LinkedList header, final int start, final int upTo) {
+ final Iterator headerElementIterator = header.listIterator(start);
int index = start;
while (index < upTo) {
if (!headerElementIterator.hasNext())
throw new ReviewedStingException("There are no more header elements in this window");
- HeaderElement headerElement = headerElementIterator.next();
+ if (!headerElementIterator.next().hasConsensusData())
+ break;
+ index++;
+ }
+ return index;
+ }
+
+ /**
+ * Looks for the next position witho consensus data
+ *
+ * @param header the header to check
+ * @param start beginning of the filtered region
+ * @param upTo limit to search for another consensus element
+ * @return next position in local coordinates (relative to the windowHeader) with consensus data; otherwise, the start position
+ */
+ private int findNextConsensusElement(final LinkedList header, final int start, final int upTo) {
+ final Iterator headerElementIterator = header.listIterator(start);
+ int index = start;
+ while (index < upTo) {
+ if (!headerElementIterator.hasNext())
+ throw new ReviewedStingException("There are no more header elements in this window");
+
+ if (headerElementIterator.next().hasConsensusData())
+ break;
+ index++;
+ }
+ return index;
+ }
+
+ /**
+ * Adds bases to the filtered data synthetic read.
+ *
+ * Different from the addToConsensus method, this method assumes a contiguous sequence of filteredData
+ * bases.
+ *
+ * @param header the window header
+ * @param start the first header index to add to consensus
+ * @param end the first header index NOT TO add to consensus
+ * @param strandType the strandedness that the synthetic read should be represented as having
+ */
+ @Requires({"start >= 0 && (end >= start || end == 0)"})
+ private void addToRunningConsensus(final LinkedList header, final int start, final int end, final SyntheticRead.StrandType strandType) {
+ if (runningConsensus == null)
+ runningConsensus = new SyntheticRead(samHeader, readGroupAttribute, contig, contigIndex, consensusReadName + consensusCounter++, header.get(start).getLocation(), hasIndelQualities, strandType);
+
+ final Iterator headerElementIterator = header.listIterator(start);
+
+ for (int index = start; index < end; index++) {
+ if (!headerElementIterator.hasNext())
+ throw new ReviewedStingException("Requested to create a running consensus synthetic read from " + start + " to " + end + " but " + index + " does not exist");
+
+ final HeaderElement headerElement = headerElementIterator.next();
if (!headerElement.hasConsensusData())
+ throw new ReviewedStingException("No CONSENSUS data in " + index);
+
+ genericAddBaseToConsensus(runningConsensus, headerElement.getConsensusBaseCounts());
+ }
+ }
+
+ /**
+ * Adds bases to the running filtered data accordingly
+ *
+ * If adding a sequence with gaps, it will finalize multiple consensus reads and keep the last running consensus
+ *
+ * @param header the window header
+ * @param start the first header index to add to consensus
+ * @param end the first header index NOT TO add to consensus
+ * @return a non-null list of consensus reads generated by this call. Empty list if no consensus was generated.
+ */
+ @Requires({"start >= 0 && (end >= start || end == 0)"})
+ @Ensures("result != null")
+ protected ObjectArrayList addToFilteredReads(final LinkedList header, final int start, final int end) {
+ final ObjectArrayList reads = new ObjectArrayList();
+
+ if ( start < end ) {
+ final ListIterator headerElementIterator = header.listIterator(start);
+
+ if (!headerElementIterator.hasNext())
+ throw new ReviewedStingException(String.format("Requested to add to synthetic reads a region that contains no header element at index: %d - %d / %d", start, header.size(), end));
+
+ HeaderElement headerElement = headerElementIterator.next();
+
+ if (headerElement.hasFilteredData()) {
+
+ // find the end of the consecutive filtered data in the window
+ final int endOfFiltered = findNextNonFilteredElement(header, start, end);
+ if (endOfFiltered <= start)
+ throw new ReviewedStingException(String.format("next start is <= current start: (%d <= %d)", endOfFiltered, start));
+
+ // add to running filtered consensus and recurse
+ addToFilteredData(header, start, endOfFiltered);
+ reads.addAll(addToFilteredReads(header, endOfFiltered, end));
+
+ } else {
+
+ // add any outstanding filtered data
+ reads.addAll(finalizeAndAdd(ConsensusType.FILTERED));
+
+ // find the end of the consecutive empty data in the window
+ final int endOfEmptyData = findNextFilteredElement(header, start, end);
+ if (endOfEmptyData <= start)
+ throw new ReviewedStingException(String.format("next start is <= current start: (%d <= %d)", endOfEmptyData, start));
+
+ // recurse out of the empty region
+ reads.addAll(addToFilteredReads(header, endOfEmptyData, end));
+ }
+ }
+
+ return reads;
+ }
+
+ /**
+ * Looks for the next position without consensus data
+ *
+ * @param header the header to check
+ * @param start beginning of the filtered region
+ * @param upTo limit to search for another consensus element
+ * @return next position in local coordinates (relative to the windowHeader) with consensus data; otherwise, the start position
+ */
+ private int findNextNonFilteredElement(final LinkedList header, final int start, final int upTo) {
+ final Iterator headerElementIterator = header.listIterator(start);
+ int index = start;
+ while (index < upTo) {
+ if (!headerElementIterator.hasNext())
+ throw new ReviewedStingException("There are no more header elements in this window");
+
+ if (!headerElementIterator.next().hasFilteredData())
break;
index++;
}
@@ -499,43 +635,21 @@ public class SlidingWindow {
}
/**
- * Looks for the next position without filtered data
+ * Looks for the next position witho consensus data
*
- * @param start beginning of the region
- * @param upTo limit to search for
- * @return next position in local coordinates (relative to the windowHeader) with no filtered data; otherwise, the start position
+ * @param header the header to check
+ * @param start beginning of the filtered region
+ * @param upTo limit to search for another consensus element
+ * @return next position in local coordinates (relative to the windowHeader) with consensus data; otherwise, the start position
*/
- private int findNextNonFilteredDataElement(LinkedList header, int start, int upTo) {
- Iterator headerElementIterator = header.listIterator(start);
+ private int findNextFilteredElement(final LinkedList header, final int start, final int upTo) {
+ final Iterator headerElementIterator = header.listIterator(start);
int index = start;
while (index < upTo) {
if (!headerElementIterator.hasNext())
throw new ReviewedStingException("There are no more header elements in this window");
- HeaderElement headerElement = headerElementIterator.next();
- if (!headerElement.hasFilteredData() || headerElement.hasConsensusData())
- break;
- index++;
- }
- return index;
- }
-
- /**
- * Looks for the next non-empty header element
- *
- * @param start beginning of the region
- * @param upTo limit to search for
- * @return next position in local coordinates (relative to the windowHeader) with non-empty element; otherwise, the start position
- */
- private int findNextNonEmptyElement(LinkedList header, int start, int upTo) {
- ListIterator headerElementIterator = header.listIterator(start);
- int index = start;
- while (index < upTo) {
- if (!headerElementIterator.hasNext())
- throw new ReviewedStingException("There are no more header elements in this window");
-
- HeaderElement headerElement = headerElementIterator.next();
- if (!headerElement.isEmpty())
+ if (headerElementIterator.next().hasFilteredData())
break;
index++;
}
@@ -551,66 +665,25 @@ public class SlidingWindow {
* @param header the window header
* @param start the first header index to add to consensus
* @param end the first header index NOT TO add to consensus
- * @param isNegativeStrand should the synthetic read be represented as being on the negative strand?
- * @return a non-null list of GATKSAMRecords representing finalized filtered consensus data. Empty list if no consensus was generated.
*/
@Requires({"start >= 0 && (end >= start || end == 0)"})
@Ensures("result != null")
- private List addToFilteredData(LinkedList header, int start, int end, boolean isNegativeStrand) {
- List result = new ArrayList(0);
+ private void addToFilteredData(final LinkedList header, final int start, final int end) {
if (filteredDataConsensus == null)
- filteredDataConsensus = new SyntheticRead(samHeader, readGroupAttribute, contig, contigIndex, filteredDataReadName + filteredDataConsensusCounter++, header.get(start).getLocation(), GATKSAMRecord.REDUCED_READ_CONSENSUS_TAG, hasIndelQualities, isNegativeStrand);
+ filteredDataConsensus = new SyntheticRead(samHeader, readGroupAttribute, contig, contigIndex, filteredDataReadName + filteredDataConsensusCounter++, header.get(start).getLocation(), hasIndelQualities, SyntheticRead.StrandType.STRANDLESS);
ListIterator headerElementIterator = header.listIterator(start);
for (int index = start; index < end; index++) {
if (!headerElementIterator.hasNext())
throw new ReviewedStingException("Requested to create a filtered data synthetic read from " + start + " to " + end + " but " + index + " does not exist");
- HeaderElement headerElement = headerElementIterator.next();
- if (headerElement.hasConsensusData())
- throw new ReviewedStingException("Found consensus data inside region to add to filtered data.");
+ final HeaderElement headerElement = headerElementIterator.next();
if (!headerElement.hasFilteredData())
throw new ReviewedStingException("No filtered data in " + index);
- if ( filteredDataConsensus.getRefStart() + filteredDataConsensus.size() != headerElement.getLocation() ) {
- result.add(finalizeFilteredDataConsensus());
- filteredDataConsensus = new SyntheticRead(samHeader, readGroupAttribute, contig, contigIndex, filteredDataReadName + filteredDataConsensusCounter++, headerElement.getLocation(), GATKSAMRecord.REDUCED_READ_CONSENSUS_TAG, hasIndelQualities, isNegativeStrand);
- }
-
- genericAddBaseToConsensus(filteredDataConsensus, headerElement.getFilteredBaseCounts(), headerElement.getRMS());
- }
-
- return result;
- }
-
- /**
- * Adds bases to the filtered data synthetic read.
- *
- * Different from the addToConsensus method, this method assumes a contiguous sequence of filteredData
- * bases.
- *
- * @param header the window header
- * @param start the first header index to add to consensus
- * @param end the first header index NOT TO add to consensus
- * @param isNegativeStrand should the synthetic read be represented as being on the negative strand?
- */
- @Requires({"start >= 0 && (end >= start || end == 0)"})
- private void addToRunningConsensus(LinkedList header, int start, int end, boolean isNegativeStrand) {
- if (runningConsensus == null)
- runningConsensus = new SyntheticRead(samHeader, readGroupAttribute, contig, contigIndex, consensusReadName + consensusCounter++, header.get(start).getLocation(), GATKSAMRecord.REDUCED_READ_CONSENSUS_TAG, hasIndelQualities, isNegativeStrand);
-
- Iterator headerElementIterator = header.listIterator(start);
- for (int index = start; index < end; index++) {
- if (!headerElementIterator.hasNext())
- throw new ReviewedStingException("Requested to create a running consensus synthetic read from " + start + " to " + end + " but " + index + " does not exist");
-
- HeaderElement headerElement = headerElementIterator.next();
- if (!headerElement.hasConsensusData())
- throw new ReviewedStingException("No CONSENSUS data in " + index);
-
- genericAddBaseToConsensus(runningConsensus, headerElement.getConsensusBaseCounts(), headerElement.getRMS());
+ genericAddBaseToConsensus(filteredDataConsensus, headerElement.getFilteredBaseCounts());
}
}
@@ -619,15 +692,14 @@ public class SlidingWindow {
*
* @param syntheticRead the synthetic read to add to
* @param baseCounts the base counts object in the header element
- * @param rms the rms mapping quality in the header element
*/
- private void genericAddBaseToConsensus(SyntheticRead syntheticRead, BaseAndQualsCounts baseCounts, double rms) {
+ private void genericAddBaseToConsensus(final SyntheticRead syntheticRead, final BaseAndQualsCounts baseCounts) {
final BaseIndex base = baseCounts.baseIndexWithMostProbability();
byte count = (byte) Math.min(baseCounts.countOfBase(base), Byte.MAX_VALUE);
byte qual = baseCounts.averageQualsOfBase(base);
byte insQual = baseCounts.averageInsertionQualsOfBase(base);
byte delQual = baseCounts.averageDeletionQualsOfBase(base);
- syntheticRead.add(base, count, qual, insQual, delQual, rms);
+ syntheticRead.add(base, count, qual, insQual, delQual, baseCounts.getRMS());
}
/**
@@ -635,117 +707,219 @@ public class SlidingWindow {
*
* @param start the first window header index in the variant region (inclusive)
* @param stop the last window header index of the variant region (inclusive)
- * @param disallowPolyploidReductionAtThisPosition should we disallow polyploid (het) compression here?
- * @return a non-null list of all reads contained in the variant region
+ * @param knownSnpPositions the set of known SNPs used to determine whether to allow polyploid consensus creation here; can be null (to allow polyploid consensus anywhere)
+ * @return a non-null object representing all reads contained in the variant region
*/
@Requires({"start >= 0 && (stop >= start || stop == 0)"})
@Ensures("result != null")
- protected List compressVariantRegion(final int start, final int stop, final boolean disallowPolyploidReductionAtThisPosition) {
- List allReads = new LinkedList();
+ protected CloseVariantRegionResult compressVariantRegion(final int start, final int stop, final ObjectSortedSet knownSnpPositions) {
+ final CloseVariantRegionResult allReads = new CloseVariantRegionResult(stop);
// Try to compress into a polyploid consensus
- int nVariantPositions = 0;
- int hetRefPosition = -1;
- boolean canCompress = true;
- Object[] header = windowHeader.toArray();
+ // Optimization: don't bother if there are no known SNPs here
+ final int hetRefPosition = (knownSnpPositions != null && knownSnpPositions.isEmpty()) ? -1 : findSinglePolyploidCompressiblePosition(start, stop);
- // foundEvent will remain false if we don't allow polyploid reduction
- if ( allowPolyploidReductionInGeneral && !disallowPolyploidReductionAtThisPosition ) {
- for (int i = start; i<=stop; i++) {
-
- int nAlleles = ((HeaderElement) header[i]).getNumberOfAlleles(MIN_ALT_BASE_PROPORTION_TO_TRIGGER_VARIANT);
-
- // we will only work on diploid cases because we just don't want to handle/test other scenarios
- if ( nAlleles > 2 ) {
- canCompress = false;
- break;
- } else if ( nAlleles == 2 ) {
- nVariantPositions++;
-
- // make sure that there is only 1 site in the variant region that contains more than one allele
- if ( nVariantPositions == 1 ) {
- hetRefPosition = i;
- } else if ( nVariantPositions > 1 ) {
- canCompress = false;
- break;
- }
- }
- }
+ // Note that using the hetRefPosition protects us from trying to compress variant regions that are created by
+ // insertions (which we don't want because we can't confirm that they represent the same allele).
+ // Also, we only allow polyploid consensus creation at known sites if provided.
+ if ( hetRefPosition != -1 && matchesKnownPosition(windowHeader.get(hetRefPosition).getLocation(), knownSnpPositions) ) {
+ // try to create the polyploid consensus
+ allReads.reads.addAll(createPolyploidConsensus(hetRefPosition));
+ allReads.stopPerformed = hetRefPosition; // we stopped at the het position
}
-
- // Try to compress the variant region; note that using the hetRefPosition protects us from trying to compress
- // variant regions that are created by insertions (since we can't confirm here that they represent the same allele)
- if ( canCompress && hetRefPosition != -1 ) {
- allReads = createPolyploidConsensus(start, stop, ((HeaderElement) header[hetRefPosition]).getLocation());
- }
-
- // Return all reads that overlap the variant region and remove them from the window header entirely
- // also remove all reads preceding the variant region (since they will be output as consensus right after compression
+ // if we can't create a polyploid consensus here, return all reads that overlap the variant region and remove them
+ // from the window header entirely; also remove all reads preceding the variant region (since they will be output
+ // as consensus right after compression)
else {
final int refStart = windowHeader.get(start).getLocation();
final int refStop = windowHeader.get(stop).getLocation();
- LinkedList toRemove = new LinkedList();
- for (GATKSAMRecord read : readsInWindow) {
- if (read.getSoftStart() <= refStop) {
- if (read.getAlignmentEnd() >= refStart) {
- allReads.add(read);
+ final ObjectList toRemove = new ObjectArrayList();
+ for ( final GATKSAMRecord read : readsInWindow ) {
+ if ( read.getSoftStart() <= refStop ) {
+ if ( read.getAlignmentEnd() >= refStart ) {
+ allReads.reads.add(read);
removeFromHeader(windowHeader, read);
}
toRemove.add(read);
}
}
- removeReadsFromWindow(toRemove);
+
+ // remove all used reads
+ for ( final GATKSAMRecord read : toRemove )
+ readsInWindow.remove(read);
}
+
return allReads;
}
+ /**
+ * Determines whether the given position match one of the known sites
+ *
+ * @param targetPosition the position of the het site
+ * @param knownSnpPositions the set of known SNPs used to determine whether to allow polyploid consensus creation here; can be null (to allow polyploid consensus anywhere)
+ * @return true if the targetPosition matches a known SNP position, false otherwise
+ */
+ @Requires({"targetPosition >= 1 && knownSnpPositions != null"})
+ protected boolean matchesKnownPosition(final int targetPosition, final ObjectSortedSet knownSnpPositions) {
+ final GenomeLoc targetLoc = new UnvalidatingGenomeLoc(contig, contigIndex, targetPosition, targetPosition);
+ return knownSnpPositions == null || knownSnpPositions.contains(targetLoc);
+ }
+
+ /*
+ * Finds the het variant position located within start and stop (inclusive) if one exists.
+ *
+ * @param start the first header index in the region to check (inclusive)
+ * @param stop the last header index of the region to check (inclusive)
+ * @return the window header index of the single het position or -1 if either none or more than one exists
+ */
+ @Requires("start >= 0 && (stop >= start || stop == 0)")
+ protected int findSinglePolyploidCompressiblePosition(final int start, final int stop) {
+ int hetRefPosition = -1;
+
+ for ( int i = start; i <= stop; i++ ) {
+
+ final int nAlleles = windowHeader.get(i).getNumberOfBaseAlleles(MIN_ALT_PVALUE_TO_TRIGGER_VARIANT, MIN_ALT_PROPORTION_TO_TRIGGER_VARIANT);
+
+ // we will only work on diploid non-indel cases because we just don't want to handle/test other scenarios
+ if ( nAlleles > 2 || nAlleles == -1 )
+ return -1;
+
+ if ( nAlleles == 2 ) {
+
+ // make sure that there is only 1 site in the region that contains more than one allele
+ if ( hetRefPosition != -1 )
+ return -1;
+
+ hetRefPosition = i;
+ }
+ }
+
+ return hetRefPosition;
+ }
+
+ /*
+ * Checks whether there's a position in the header with a significant number of softclips or a variant.
+ *
+ * @param header the window header to examine
+ * @param positionToSkip the global position to skip in the examination (use negative number if you don't want to make use of this argument)
+ * @return true if there exists a position with significant softclips, false otherwise
+ */
+ @Requires("header != null")
+ protected boolean hasPositionWithSignificantSoftclipsOrVariant(final List header, final int positionToSkip) {
+
+ for ( final HeaderElement headerElement : header ) {
+
+ if ( headerElement.getLocation() == positionToSkip )
+ continue;
+
+ if ( headerElement.hasSignificantSoftclips(MIN_ALT_PVALUE_TO_TRIGGER_VARIANT, MIN_ALT_PROPORTION_TO_TRIGGER_VARIANT) ||
+ headerElement.getNumberOfBaseAlleles(MIN_ALT_PVALUE_TO_TRIGGER_VARIANT, MIN_ALT_PROPORTION_TO_TRIGGER_VARIANT) > 1 )
+ return true;
+ }
+
+ return false;
+ }
+
/**
* Finalizes a variant region, any adjacent synthetic reads.
*
* @param start the first window header index in the variant region (inclusive)
* @param stop the last window header index of the variant region (inclusive)
- * @param disallowPolyploidReductionAtThisPosition should we disallow polyploid (het) compression here?
- * @return a non-null list of all reads contained in the variant region plus any adjacent synthetic reads
+ * @param knownSnpPositions the set of known SNPs used to determine whether to allow polyploid consensus creation here; can be null (to allow polyploid consensus anywhere)
+ * @return a non-null object representing all reads contained in the variant region plus any adjacent synthetic reads
*/
@Requires({"start >= 0 && (stop >= start || stop == 0)"})
@Ensures("result != null")
- protected List closeVariantRegion(final int start, final int stop, final boolean disallowPolyploidReductionAtThisPosition) {
- List allReads = compressVariantRegion(start, stop, disallowPolyploidReductionAtThisPosition);
+ protected CloseVariantRegionResult closeVariantRegion(final int start, final int stop, final ObjectSortedSet knownSnpPositions) {
+ final CloseVariantRegionResult allReads = compressVariantRegion(start, stop, knownSnpPositions);
- List result = (downsampleCoverage > 0) ? downsampleVariantRegion(allReads) : allReads;
- result.addAll(addToSyntheticReads(windowHeader, 0, stop, false));
- result.addAll(finalizeAndAdd(ConsensusType.BOTH));
+ final CloseVariantRegionResult result = new CloseVariantRegionResult(allReads.stopPerformed);
+ result.reads.addAll(downsampleCoverage > 0 ? downsampleVariantRegion(allReads.reads) : allReads.reads);
+ result.reads.addAll(addToSyntheticReads(windowHeader, 0, allReads.stopPerformed + 1, SyntheticRead.StrandType.STRANDLESS));
+ result.reads.addAll(addToFilteredReads(windowHeader, 0, allReads.stopPerformed + 1));
+ result.reads.addAll(finalizeAndAdd(ConsensusType.BOTH));
return result; // finalized reads will be downsampled if necessary
}
- public Set closeVariantRegions(CompressionStash regions) {
- TreeSet allReads = new TreeSet(new AlignmentStartWithNoTiesComparator());
- if (!regions.isEmpty()) {
- int lastStop = -1;
- int windowHeaderStart = getStartLocation(windowHeader);
+ /*
+ * @see #closeVariantRegions(CompressionStash, ObjectSortedSet