Merge remote-tracking branch 'unstable/master'

This commit is contained in:
Eric Banks 2013-06-20 12:14:33 -04:00
commit 6977d6e2a7
276 changed files with 14827 additions and 3583 deletions

3
README.md 100644
View File

@ -0,0 +1,3 @@
gsa-unstable
============
See http://www.broadinstitute.org/gatk/

View File

@ -1031,6 +1031,7 @@
<delete dir="${staging.dir}"/>
<delete dir="${dist.dir}"/>
<delete dir="${pipelinetest.dir}"/>
<delete dir="${integration.tests.dir}"/>
</target>
<!-- Depend on this target if your target requires a clean working directory but you don't want to depend on clean directly -->
@ -1043,6 +1044,7 @@
<available file="${staging.dir}" />
<available file="${dist.dir}" />
<available file="${pipelinetest.dir}" />
<available file="${integration.tests.dir}" />
<available file="${javadoc.dir}" />
<available file="${scaladoc.dir}" />
<available file="${gatkdocs.dir}" />
@ -1078,6 +1080,7 @@
<property name="scala.public.test.sources" value="${public.dir}/scala/test"/>
<property name="scala.private.test.sources" value="${private.dir}/scala/test"/>
<property name="scala.protected.test.sources" value="${protected.dir}/scala/test"/>
<property name="integration.tests.dir" value="integrationtests" />
<property name="pipelinetest.dir" value="pipelinetests" />
<property name="report" value="${build.dir}/report"/>
<property name="iwww.report.dir" value="${user.home}/private_html/report"/>

View File

@ -47,13 +47,11 @@
package org.broadinstitute.sting.gatk.walkers.annotator;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.StandardAnnotation;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.variant.vcf.VCFHeaderLineType;
import org.broadinstitute.variant.vcf.VCFInfoHeaderLine;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
import org.broadinstitute.variant.variantcontext.Allele;
import java.util.*;
@ -71,37 +69,11 @@ public class BaseQualityRankSumTest extends RankSumTest implements StandardAnnot
public List<VCFInfoHeaderLine> getDescriptions() { return Arrays.asList(new VCFInfoHeaderLine("BaseQRankSum", 1, VCFHeaderLineType.Float, "Z-score from Wilcoxon rank sum test of Alt Vs. Ref base qualities")); }
protected void fillQualsFromPileup(final List<Allele> allAlleles, final int refLoc,
final ReadBackedPileup pileup,
final PerReadAlleleLikelihoodMap alleleLikelihoodMap,
final List<Double> refQuals, final List<Double> altQuals){
if (alleleLikelihoodMap == null) {
// use fast SNP-based version if we don't have per-read allele likelihoods
for ( final PileupElement p : pileup ) {
if ( isUsableBase(p) ) {
if ( allAlleles.get(0).equals(Allele.create(p.getBase(),true)) ) {
refQuals.add((double)p.getQual());
} else if ( allAlleles.contains(Allele.create(p.getBase()))) {
altQuals.add((double)p.getQual());
}
}
}
return;
}
for (Map<Allele,Double> el : alleleLikelihoodMap.getLikelihoodMapValues()) {
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el);
if (! a.isInformative())
continue; // read is non-informative
if (a.getMostLikelyAllele().isReference())
refQuals.add(-10.0*(double)el.get(a.getMostLikelyAllele()));
else if (allAlleles.contains(a.getMostLikelyAllele()))
altQuals.add(-10.0*(double)el.get(a.getMostLikelyAllele()));
}
protected Double getElementForRead(final GATKSAMRecord read, final int refLoc) {
return (double)read.getBaseQualities()[ReadUtils.getReadCoordinateForReferenceCoordinateUpToEndOfRead(read, refLoc, ReadUtils.ClippingTail.RIGHT_TAIL)];
}
protected Double getElementForPileupElement(final PileupElement p) {
return (double)p.getQual();
}
}

View File

@ -46,14 +46,11 @@
package org.broadinstitute.sting.gatk.walkers.annotator;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.variant.vcf.VCFHeaderLineType;
import org.broadinstitute.variant.vcf.VCFInfoHeaderLine;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.variant.variantcontext.Allele;
import java.util.*;
@ -74,26 +71,12 @@ public class ClippingRankSumTest extends RankSumTest {
public List<VCFInfoHeaderLine> getDescriptions() { return Arrays.asList(new VCFInfoHeaderLine("ClippingRankSum", 1, VCFHeaderLineType.Float, "Z-score From Wilcoxon rank sum test of Alt vs. Ref number of hard clipped bases")); }
protected void fillQualsFromPileup(final List<Allele> allAlleles,
final int refLoc,
final ReadBackedPileup pileup,
final PerReadAlleleLikelihoodMap likelihoodMap, final List<Double> refQuals, final List<Double> altQuals) {
// todo - only support non-pileup case for now, e.g. active-region based version
if (pileup != null || likelihoodMap == null)
return;
for (Map.Entry<GATKSAMRecord,Map<Allele,Double>> el : likelihoodMap.getLikelihoodReadMap().entrySet()) {
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue());
if (! a.isInformative())
continue; // read is non-informative
if (a.getMostLikelyAllele().isReference())
refQuals.add((double)AlignmentUtils.getNumHardClippedBases(el.getKey()));
else if (allAlleles.contains(a.getMostLikelyAllele()))
altQuals.add((double)AlignmentUtils.getNumHardClippedBases(el.getKey()));
}
protected Double getElementForRead(final GATKSAMRecord read, final int refLoc) {
return (double)AlignmentUtils.getNumHardClippedBases(read);
}
protected Double getElementForPileupElement(final PileupElement p) {
// TODO - we only support the non-pileup case for now, e.g. an active-region based version
return null;
}
}

View File

@ -66,10 +66,7 @@ import org.broadinstitute.variant.variantcontext.Genotype;
import org.broadinstitute.variant.variantcontext.GenotypeBuilder;
import org.broadinstitute.variant.variantcontext.VariantContext;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.*;
/**
@ -135,20 +132,24 @@ public class DepthPerAlleleBySample extends GenotypeAnnotation implements Standa
}
private void annotateWithLikelihoods(final PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap, final VariantContext vc, final GenotypeBuilder gb) {
final HashMap<Allele, Integer> alleleCounts = new HashMap<Allele, Integer>();
final Set<Allele> alleles = new HashSet<>(vc.getAlleles());
// make sure that there's a meaningful relationship between the alleles in the perReadAlleleLikelihoodMap and our VariantContext
if ( ! perReadAlleleLikelihoodMap.getAllelesSet().containsAll(alleles) )
throw new IllegalStateException("VC alleles " + alleles + " not a strict subset of per read allele map alleles " + perReadAlleleLikelihoodMap.getAllelesSet());
final HashMap<Allele, Integer> alleleCounts = new HashMap<>();
for ( final Allele allele : vc.getAlleles() ) { alleleCounts.put(allele, 0); }
for ( final Allele allele : vc.getAlleles() ) {
alleleCounts.put(allele, 0);
}
for (Map.Entry<GATKSAMRecord,Map<Allele,Double>> el : perReadAlleleLikelihoodMap.getLikelihoodReadMap().entrySet()) {
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue(), alleles);
if (! a.isInformative() ) continue; // read is non-informative
final GATKSAMRecord read = el.getKey();
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue());
if (! a.isInformative() )
continue; // read is non-informative
if (!vc.getAlleles().contains(a.getMostLikelyAllele()))
continue; // sanity check - shouldn't be needed
alleleCounts.put(a.getMostLikelyAllele(), alleleCounts.get(a.getMostLikelyAllele()) + (read.isReducedRead() ? read.getReducedCount(ReadUtils.getReadCoordinateForReferenceCoordinateUpToEndOfRead(read, vc.getStart(), ReadUtils.ClippingTail.RIGHT_TAIL)) : 1));
final int prevCount = alleleCounts.get(a.getMostLikelyAllele());
final int incCount = read.isReducedRead() ? read.getReducedCount(ReadUtils.getReadCoordinateForReferenceCoordinateUpToEndOfRead(read, vc.getStart(), ReadUtils.ClippingTail.RIGHT_TAIL)) : 1;
alleleCounts.put(a.getMostLikelyAllele(), prevCount + incCount);
}
final int[] counts = new int[alleleCounts.size()];
counts[0] = alleleCounts.get(vc.getReference());
for (int i = 0; i < vc.getAlternateAlleles().size(); i++)

View File

@ -0,0 +1,126 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.annotator;
import org.broadinstitute.sting.gatk.contexts.AlignmentContext;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.AnnotatorCompatible;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.GenotypeAnnotation;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.StandardAnnotation;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.Genotype;
import org.broadinstitute.variant.variantcontext.GenotypeBuilder;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.vcf.VCFConstants;
import org.broadinstitute.variant.vcf.VCFFormatHeaderLine;
import org.broadinstitute.variant.vcf.VCFStandardHeaderLines;
import java.util.*;
/**
* The depth of coverage of each allele per sample
*
* the depth for the HC is the sum of the informative alleles at this site. It's not perfect (as we cannot
* differentiate between reads that align over the event but aren't informative vs. those that aren't even
* close) but it's a pretty good proxy and it matches with the AD field (i.e., sum(AD) = DP).
*/
public class DepthPerSampleHC extends GenotypeAnnotation {
public void annotate(final RefMetaDataTracker tracker,
final AnnotatorCompatible walker,
final ReferenceContext ref,
final AlignmentContext stratifiedContext,
final VariantContext vc,
final Genotype g,
final GenotypeBuilder gb,
final PerReadAlleleLikelihoodMap alleleLikelihoodMap) {
if ( g == null || !g.isCalled() || ( stratifiedContext == null && alleleLikelihoodMap == null) )
return;
if (alleleLikelihoodMap == null )
throw new IllegalStateException("DepthPerSampleHC can only be used with likelihood based annotations in the HaplotypeCaller");
// the depth for the HC is the sum of the informative alleles at this site. It's not perfect (as we cannot
// differentiate between reads that align over the event but aren't informative vs. those that aren't even
// close) but it's a pretty good proxy and it matches with the AD field (i.e., sum(AD) = DP).
int dp = 0;
if ( alleleLikelihoodMap.isEmpty() ) {
// there are no reads
} else {
final Set<Allele> alleles = new HashSet<>(vc.getAlleles());
// make sure that there's a meaningful relationship between the alleles in the perReadAlleleLikelihoodMap and our VariantContext
if ( ! alleleLikelihoodMap.getAllelesSet().containsAll(alleles) )
throw new IllegalStateException("VC alleles " + alleles + " not a strict subset of per read allele map alleles " + alleleLikelihoodMap.getAllelesSet());
for (Map.Entry<GATKSAMRecord,Map<Allele,Double>> el : alleleLikelihoodMap.getLikelihoodReadMap().entrySet()) {
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue(), alleles);
if ( a.isInformative() ) {
final GATKSAMRecord read = el.getKey();
final int incCount = read.isReducedRead() ? read.getReducedCount(ReadUtils.getReadCoordinateForReferenceCoordinateUpToEndOfRead(read, vc.getStart(), ReadUtils.ClippingTail.RIGHT_TAIL)) : 1;
dp += incCount;
}
}
gb.DP(dp);
}
}
public List<String> getKeyNames() {
return Collections.singletonList(VCFConstants.DEPTH_KEY);
}
public List<VCFFormatHeaderLine> getDescriptions() {
return Collections.singletonList(VCFStandardHeaderLines.getFormatLine(getKeyNames().get(0)));
}
}

View File

@ -300,7 +300,7 @@ public class FisherStrand extends InfoFieldAnnotation implements StandardAnnotat
for ( Map.Entry<String, AlignmentContext> sample : stratifiedContexts.entrySet() ) {
for (PileupElement p : sample.getValue().getBasePileup()) {
if ( ! RankSumTest.isUsableBase(p, false) ) // ignore deletions
if ( ! isUsableBase(p) ) // ignore deletions and bad MQ
continue;
if ( p.getQual() < minQScoreToConsider || p.getMappingQual() < minQScoreToConsider )
@ -313,6 +313,20 @@ public class FisherStrand extends InfoFieldAnnotation implements StandardAnnotat
return table;
}
/**
* Can the base in this pileup element be used in comparative tests?
*
* @param p the pileup element to consider
*
* @return true if this base is part of a meaningful read for comparison, false otherwise
*/
private static boolean isUsableBase(final PileupElement p) {
return !( p.isDeletion() ||
p.getMappingQual() == 0 ||
p.getMappingQual() == QualityUtils.MAPPING_QUALITY_UNAVAILABLE ||
((int) p.getQual()) < QualityUtils.MIN_USABLE_Q_SCORE);
}
private static void updateTable(final int[][] table, final Allele allele, final GATKSAMRecord read, final Allele ref, final Allele alt, final int representativeCount) {
final boolean matchesRef = allele.equals(ref, true);

View File

@ -47,14 +47,10 @@
package org.broadinstitute.sting.gatk.walkers.annotator;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.StandardAnnotation;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.variant.vcf.VCFHeaderLineType;
import org.broadinstitute.variant.vcf.VCFInfoHeaderLine;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.variant.variantcontext.Allele;
import java.util.*;
@ -73,35 +69,11 @@ public class MappingQualityRankSumTest extends RankSumTest implements StandardAn
public List<VCFInfoHeaderLine> getDescriptions() { return Arrays.asList(new VCFInfoHeaderLine("MQRankSum", 1, VCFHeaderLineType.Float, "Z-score From Wilcoxon rank sum test of Alt vs. Ref read mapping qualities")); }
protected void fillQualsFromPileup(final List<Allele> allAlleles,
final int refLoc,
final ReadBackedPileup pileup,
final PerReadAlleleLikelihoodMap likelihoodMap,
final List<Double> refQuals, final List<Double> altQuals) {
if (pileup != null && likelihoodMap == null) {
// old UG snp-only path through the annotations
for ( final PileupElement p : pileup ) {
if ( isUsableBase(p) ) {
if ( allAlleles.get(0).equals(Allele.create(p.getBase(), true)) ) {
refQuals.add((double)p.getMappingQual());
} else if ( allAlleles.contains(Allele.create(p.getBase()))) {
altQuals.add((double)p.getMappingQual());
}
}
}
return;
}
for (Map.Entry<GATKSAMRecord,Map<Allele,Double>> el : likelihoodMap.getLikelihoodReadMap().entrySet()) {
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue());
// BUGBUG: There needs to be a comparable isUsableBase check here
if (! a.isInformative())
continue; // read is non-informative
if (a.getMostLikelyAllele().isReference())
refQuals.add((double)el.getKey().getMappingQuality());
else if (allAlleles.contains(a.getMostLikelyAllele()))
altQuals.add((double)el.getKey().getMappingQuality());
}
protected Double getElementForRead(final GATKSAMRecord read, final int refLoc) {
return (double)read.getMappingQuality();
}
}
protected Double getElementForPileupElement(final PileupElement p) {
return (double)p.getRead().getMappingQuality();
}
}

View File

@ -56,6 +56,7 @@ import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.StandardAnnota
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.QualityUtils;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.variant.vcf.VCFConstants;
import org.broadinstitute.variant.vcf.VCFInfoHeaderLine;
import org.broadinstitute.variant.vcf.VCFStandardHeaderLines;
@ -77,55 +78,41 @@ public class RMSMappingQuality extends InfoFieldAnnotation implements StandardAn
final Map<String, AlignmentContext> stratifiedContexts,
final VariantContext vc,
final Map<String, PerReadAlleleLikelihoodMap> perReadAlleleLikelihoodMap ) {
int totalSize = 0, index = 0;
int qualities[];
if (stratifiedContexts != null) {
final List<Integer> qualities = new ArrayList<>();
if ( stratifiedContexts != null ) {
if ( stratifiedContexts.size() == 0 )
return null;
for ( AlignmentContext context : stratifiedContexts.values() )
totalSize += context.size();
qualities = new int[totalSize];
for ( Map.Entry<String, AlignmentContext> sample : stratifiedContexts.entrySet() ) {
AlignmentContext context = sample.getValue();
for (PileupElement p : context.getBasePileup() )
index = fillMappingQualitiesFromPileupAndUpdateIndex(p.getRead(), index, qualities);
for ( final Map.Entry<String, AlignmentContext> sample : stratifiedContexts.entrySet() ) {
final AlignmentContext context = sample.getValue();
for ( final PileupElement p : context.getBasePileup() )
fillMappingQualitiesFromPileup(p.getRead().getMappingQuality(), p.getRepresentativeCount(), qualities);
}
}
else if (perReadAlleleLikelihoodMap != null) {
if ( perReadAlleleLikelihoodMap.size() == 0 )
return null;
for ( PerReadAlleleLikelihoodMap perReadLikelihoods : perReadAlleleLikelihoodMap.values() )
totalSize += perReadLikelihoods.size();
qualities = new int[totalSize];
for ( PerReadAlleleLikelihoodMap perReadLikelihoods : perReadAlleleLikelihoodMap.values() ) {
for (GATKSAMRecord read : perReadLikelihoods.getStoredElements())
index = fillMappingQualitiesFromPileupAndUpdateIndex(read, index, qualities);
}
for ( final PerReadAlleleLikelihoodMap perReadLikelihoods : perReadAlleleLikelihoodMap.values() ) {
for ( final GATKSAMRecord read : perReadLikelihoods.getStoredElements() )
fillMappingQualitiesFromPileup(read.getMappingQuality(), (read.isReducedRead() ? read.getReducedCount(ReadUtils.getReadCoordinateForReferenceCoordinateUpToEndOfRead(read, vc.getStart(), ReadUtils.ClippingTail.RIGHT_TAIL)) : 1), qualities);
}
}
else
return null;
double rms = MathUtils.rms(qualities);
Map<String, Object> map = new HashMap<String, Object>();
map.put(getKeyNames().get(0), String.format("%.2f", rms));
return map;
final double rms = MathUtils.rms(qualities);
return Collections.singletonMap(getKeyNames().get(0), (Object)String.format("%.2f", rms));
}
private static int fillMappingQualitiesFromPileupAndUpdateIndex(final GATKSAMRecord read, final int inputIdx, final int[] qualities) {
int outputIdx = inputIdx;
if ( read.getMappingQuality() != QualityUtils.MAPPING_QUALITY_UNAVAILABLE )
qualities[outputIdx++] = read.getMappingQuality();
return outputIdx;
private static void fillMappingQualitiesFromPileup(final int mq, final int representativeCount, final List<Integer> qualities) {
if ( mq != QualityUtils.MAPPING_QUALITY_UNAVAILABLE ) {
if ( representativeCount == 1 )
qualities.add(mq);
else
qualities.addAll(Collections.nCopies(representativeCount, mq));
}
}
public List<String> getKeyNames() { return Arrays.asList(VCFConstants.RMS_MAPPING_QUALITY_KEY); }

View File

@ -53,9 +53,11 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.ActiveRegionBasedAnnotation;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.AnnotatorCompatible;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.InfoFieldAnnotation;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.MannWhitneyU;
import org.broadinstitute.sting.utils.QualityUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.variant.vcf.VCFHeaderLine;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.pileup.PileupElement;
@ -87,31 +89,33 @@ public abstract class RankSumTest extends InfoFieldAnnotation implements ActiveR
if (genotypes == null || genotypes.size() == 0)
return null;
final ArrayList<Double> refQuals = new ArrayList<Double>();
final ArrayList<Double> altQuals = new ArrayList<Double>();
final ArrayList<Double> refQuals = new ArrayList<>();
final ArrayList<Double> altQuals = new ArrayList<>();
for ( final Genotype genotype : genotypes.iterateInSampleNameOrder() ) {
PerReadAlleleLikelihoodMap indelLikelihoodMap = null;
ReadBackedPileup pileup = null;
boolean usePileup = true;
if (stratifiedContexts != null) { // the old UG SNP-only path through the annotations
final AlignmentContext context = stratifiedContexts.get(genotype.getSampleName());
if ( context != null )
pileup = context.getBasePileup();
if ( stratifiedPerReadAlleleLikelihoodMap != null ) {
final PerReadAlleleLikelihoodMap likelihoodMap = stratifiedPerReadAlleleLikelihoodMap.get(genotype.getSampleName());
if ( likelihoodMap != null && !likelihoodMap.isEmpty() ) {
fillQualsFromLikelihoodMap(vc.getAlleles(), vc.getStart(), likelihoodMap, refQuals, altQuals);
usePileup = false;
}
}
if (stratifiedPerReadAlleleLikelihoodMap != null )
indelLikelihoodMap = stratifiedPerReadAlleleLikelihoodMap.get(genotype.getSampleName());
if (indelLikelihoodMap != null && indelLikelihoodMap.isEmpty())
indelLikelihoodMap = null;
// treat an empty likelihood map as a null reference - will simplify contract with fillQualsFromPileup
if (indelLikelihoodMap == null && pileup == null)
continue;
fillQualsFromPileup(vc.getAlleles(), vc.getStart(), pileup, indelLikelihoodMap, refQuals, altQuals );
// the old UG SNP-only path through the annotations
if ( usePileup && stratifiedContexts != null ) {
final AlignmentContext context = stratifiedContexts.get(genotype.getSampleName());
if ( context != null ) {
final ReadBackedPileup pileup = context.getBasePileup();
if ( pileup != null )
fillQualsFromPileup(vc.getAlleles(), pileup, refQuals, altQuals);
}
}
}
if (refQuals.isEmpty() && altQuals.isEmpty())
if ( refQuals.isEmpty() && altQuals.isEmpty() )
return null;
final MannWhitneyU mannWhitneyU = new MannWhitneyU(useDithering);
@ -136,18 +140,72 @@ public abstract class RankSumTest extends InfoFieldAnnotation implements ActiveR
// we are testing that set1 (the alt bases) have lower quality scores than set2 (the ref bases)
final Pair<Double, Double> testResults = mannWhitneyU.runOneSidedTest(MannWhitneyU.USet.SET1);
final Map<String, Object> map = new HashMap<String, Object>();
final Map<String, Object> map = new HashMap<>();
if (!Double.isNaN(testResults.first))
map.put(getKeyNames().get(0), String.format("%.3f", testResults.first));
return map;
}
protected abstract void fillQualsFromPileup(final List<Allele> alleles,
final int refLoc,
final ReadBackedPileup readBackedPileup,
final PerReadAlleleLikelihoodMap alleleLikelihoodMap,
final List<Double> refQuals,
final List<Double> altQuals);
private void fillQualsFromPileup(final List<Allele> alleles,
final ReadBackedPileup pileup,
final List<Double> refQuals,
final List<Double> altQuals) {
for ( final PileupElement p : pileup ) {
if ( isUsableBase(p) ) {
final Double value = getElementForPileupElement(p);
if ( value == null )
continue;
if ( alleles.get(0).equals(Allele.create(p.getBase(), true)) )
refQuals.add(value);
else if ( alleles.contains(Allele.create(p.getBase())) )
altQuals.add(value);
}
}
}
private void fillQualsFromLikelihoodMap(final List<Allele> alleles,
final int refLoc,
final PerReadAlleleLikelihoodMap likelihoodMap,
final List<Double> refQuals,
final List<Double> altQuals) {
for ( final Map.Entry<GATKSAMRecord, Map<Allele,Double>> el : likelihoodMap.getLikelihoodReadMap().entrySet() ) {
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue());
if ( ! a.isInformative() )
continue; // read is non-informative
final GATKSAMRecord read = el.getKey();
if ( isUsableRead(read, refLoc) ) {
final Double value = getElementForRead(read, refLoc);
if ( value == null )
continue;
if ( a.getMostLikelyAllele().isReference() )
refQuals.add(value);
else if ( alleles.contains(a.getMostLikelyAllele()) )
altQuals.add(value);
}
}
}
/**
* Get the element for the given read at the given reference position
*
* @param read the read
* @param refLoc the reference position
* @return a Double representing the element to be used in the rank sum test, or null if it should not be used
*/
protected abstract Double getElementForRead(final GATKSAMRecord read, final int refLoc);
// TODO -- until the ReadPosRankSumTest stops treating these differently, we need to have separate methods for GATKSAMRecords and PileupElements. Yuck.
/**
* Get the element for the given read at the given reference position
*
* @param p the pileup element
* @return a Double representing the element to be used in the rank sum test, or null if it should not be used
*/
protected abstract Double getElementForPileupElement(final PileupElement p);
/**
* Can the base in this pileup element be used in comparative tests between ref / alt bases?
@ -157,30 +215,33 @@ public abstract class RankSumTest extends InfoFieldAnnotation implements ActiveR
* @param p the pileup element to consider
* @return true if this base is part of a meaningful read for comparison, false otherwise
*/
public static boolean isUsableBase(final PileupElement p) {
return isUsableBase(p, false);
protected boolean isUsableBase(final PileupElement p) {
return !(p.isDeletion() ||
p.getMappingQual() == 0 ||
p.getMappingQual() == QualityUtils.MAPPING_QUALITY_UNAVAILABLE ||
((int) p.getQual()) < QualityUtils.MIN_USABLE_Q_SCORE || // need the unBAQed quality score here
p.getRead().isReducedRead() );
}
/**
* Can the base in this pileup element be used in comparative tests between ref / alt bases?
* Can the read be used in comparative tests between ref / alt bases?
*
* @param p the pileup element to consider
* @param allowDeletions if true, allow p to be a deletion base
* @return true if this base is part of a meaningful read for comparison, false otherwise
* @param read the read to consider
* @param refLoc the reference location
* @return true if this read is meaningful for comparison, false otherwise
*/
public static boolean isUsableBase(final PileupElement p, final boolean allowDeletions) {
return !((! allowDeletions && p.isDeletion()) ||
p.getMappingQual() == 0 ||
p.getMappingQual() == QualityUtils.MAPPING_QUALITY_UNAVAILABLE ||
((int) p.getQual()) < QualityUtils.MIN_USABLE_Q_SCORE); // need the unBAQed quality score here
protected boolean isUsableRead(final GATKSAMRecord read, final int refLoc) {
return !( read.getMappingQuality() == 0 ||
read.getMappingQuality() == QualityUtils.MAPPING_QUALITY_UNAVAILABLE ||
read.isReducedRead() );
}
/**
* Initialize the rank sum test annotation using walker and engine information. Right now this checks to see if
* engine randomization is turned off, and if so does not dither.
* @param walker
* @param toolkit
* @param headerLines
* @param walker the walker
* @param toolkit the GATK engine
* @param headerLines the header lines
*/
public void initialize ( AnnotatorCompatible walker, GenomeAnalysisEngine toolkit, Set<VCFHeaderLine> headerLines ) {
useDithering = ! toolkit.getArguments().disableDithering;

View File

@ -51,17 +51,13 @@ import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import net.sf.samtools.SAMRecord;
import org.broadinstitute.sting.gatk.walkers.annotator.interfaces.StandardAnnotation;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.gatk.walkers.indels.PairHMMIndelErrorModel;
import org.broadinstitute.variant.vcf.VCFHeaderLineType;
import org.broadinstitute.variant.vcf.VCFInfoHeaderLine;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.variant.variantcontext.Allele;
import java.util.*;
@ -83,55 +79,34 @@ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotatio
return Arrays.asList(new VCFInfoHeaderLine("ReadPosRankSum", 1, VCFHeaderLineType.Float, "Z-score from Wilcoxon rank sum test of Alt vs. Ref read position bias"));
}
protected void fillQualsFromPileup(final List<Allele> allAlleles,
final int refLoc,
final ReadBackedPileup pileup,
final PerReadAlleleLikelihoodMap alleleLikelihoodMap,
final List<Double> refQuals, final List<Double> altQuals) {
protected Double getElementForRead(final GATKSAMRecord read, final int refLoc) {
final int offset = ReadUtils.getReadCoordinateForReferenceCoordinate( read.getSoftStart(), read.getCigar(), refLoc, ReadUtils.ClippingTail.RIGHT_TAIL, true );
if ( offset == ReadUtils.CLIPPING_GOAL_NOT_REACHED )
return null;
if (alleleLikelihoodMap == null) {
// use old UG SNP-based version if we don't have per-read allele likelihoods
for ( final PileupElement p : pileup ) {
if ( isUsableBase(p) && p.getRead().getCigar() != null ) {
int readPos = AlignmentUtils.calcAlignmentByteArrayOffset(p.getRead().getCigar(), p, 0, 0);
readPos = getFinalReadPosition(p.getRead(),readPos);
if ( allAlleles.get(0).equals(Allele.create(p.getBase(), true)) ) {
refQuals.add((double)readPos);
} else if ( allAlleles.contains(Allele.create(p.getBase()))) {
altQuals.add((double)readPos);
}
}
}
return;
}
for (Map.Entry<GATKSAMRecord,Map<Allele,Double>> el : alleleLikelihoodMap.getLikelihoodReadMap().entrySet()) {
final MostLikelyAllele a = PerReadAlleleLikelihoodMap.getMostLikelyAllele(el.getValue());
if (! a.isInformative() )
continue; // read is non-informative
final GATKSAMRecord read = el.getKey();
if ( read.getSoftStart() + read.getCigar().getReadLength() <= refLoc ) { // make sure the read actually covers the requested ref loc
continue;
}
final int offset = ReadUtils.getReadCoordinateForReferenceCoordinate( read.getSoftStart(), read.getCigar(), refLoc, ReadUtils.ClippingTail.RIGHT_TAIL, true );
if ( offset == ReadUtils.CLIPPING_GOAL_NOT_REACHED || read.getCigar() == null )
continue;
int readPos = AlignmentUtils.calcAlignmentByteArrayOffset( read.getCigar(), offset, false, 0, 0 );
final int numAlignedBases = AlignmentUtils.getNumAlignedBasesCountingSoftClips( read );
if (readPos > numAlignedBases / 2)
readPos = numAlignedBases - (readPos + 1);
if (a.getMostLikelyAllele().isReference())
refQuals.add((double)readPos);
else if (allAlleles.contains(a.getMostLikelyAllele()))
altQuals.add((double)readPos);
}
int readPos = AlignmentUtils.calcAlignmentByteArrayOffset( read.getCigar(), offset, false, 0, 0 );
final int numAlignedBases = AlignmentUtils.getNumAlignedBasesCountingSoftClips( read );
if (readPos > numAlignedBases / 2)
readPos = numAlignedBases - (readPos + 1);
return (double)readPos;
}
int getFinalReadPosition(GATKSAMRecord read, int initialReadPosition) {
protected Double getElementForPileupElement(final PileupElement p) {
final int offset = AlignmentUtils.calcAlignmentByteArrayOffset(p.getRead().getCigar(), p, 0, 0);
return (double)getFinalReadPosition(p.getRead(), offset);
}
@Override
protected boolean isUsableBase(final PileupElement p) {
return super.isUsableBase(p) && p.getRead().getCigar() != null;
}
@Override
protected boolean isUsableRead(final GATKSAMRecord read, final int refLoc) {
return super.isUsableRead(read, refLoc) && read.getSoftStart() + read.getCigar().getReadLength() > refLoc;
}
private int getFinalReadPosition(final GATKSAMRecord read, final int initialReadPosition) {
final int numAlignedBases = getNumAlignedBases(read);
int readPos = initialReadPosition;
@ -141,7 +116,8 @@ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotatio
return readPos;
}
int getNumClippedBasesAtStart(SAMRecord read) {
private int getNumClippedBasesAtStart(final SAMRecord read) {
// compute total number of clipped bases (soft or hard clipped)
// check for hard clips (never consider these bases):
final Cigar c = read.getCigar();
@ -151,8 +127,8 @@ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotatio
if (first.getOperator() == CigarOperator.H) {
numStartClippedBases = first.getLength();
}
byte[] unclippedReadBases = read.getReadBases();
byte[] unclippedReadQuals = read.getBaseQualities();
final byte[] unclippedReadBases = read.getReadBases();
final byte[] unclippedReadQuals = read.getBaseQualities();
// Do a stricter base clipping than provided by CIGAR string, since this one may be too conservative,
// and may leave a string of Q2 bases still hanging off the reads.
@ -167,11 +143,11 @@ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotatio
return numStartClippedBases;
}
int getNumAlignedBases(SAMRecord read) {
private int getNumAlignedBases(final GATKSAMRecord read) {
return read.getReadLength() - getNumClippedBasesAtStart(read) - getNumClippedBasesAtEnd(read);
}
int getNumClippedBasesAtEnd(SAMRecord read) {
private int getNumClippedBasesAtEnd(final GATKSAMRecord read) {
// compute total number of clipped bases (soft or hard clipped)
// check for hard clips (never consider these bases):
final Cigar c = read.getCigar();
@ -181,8 +157,8 @@ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotatio
if (last.getOperator() == CigarOperator.H) {
numEndClippedBases = last.getLength();
}
byte[] unclippedReadBases = read.getReadBases();
byte[] unclippedReadQuals = read.getBaseQualities();
final byte[] unclippedReadBases = read.getReadBases();
final byte[] unclippedReadQuals = read.getBaseQualities();
// Do a stricter base clipping than provided by CIGAR string, since this one may be too conservative,
// and may leave a string of Q2 bases still hanging off the reads.
@ -193,11 +169,6 @@ public class ReadPosRankSumTest extends RankSumTest implements StandardAnnotatio
break;
}
return numEndClippedBases;
}
int getOffsetFromClippedReadStart(SAMRecord read, int offset) {
return offset - getNumClippedBasesAtStart(read);
}
}

View File

@ -0,0 +1,583 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.bqsr;
import com.google.java.contract.Requires;
import org.broadinstitute.sting.commandline.Argument;
import org.broadinstitute.sting.commandline.Input;
import org.broadinstitute.sting.commandline.Output;
import org.broadinstitute.sting.gatk.contexts.AlignmentContext;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.RodWalker;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.recalibration.RecalUtils;
import org.broadinstitute.sting.utils.recalibration.RecalibrationReport;
import org.broadinstitute.sting.utils.recalibration.BaseRecalibration;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Tool to analyze and evaluate base recalibration ables.
* <p/>
* For now it generates a plot report to assess the quality of a recalibration.
*
* <h3>Input</h3>
*
* The tool can take up to three different sets of recalibration tables.
* The resulting plots will be overlaid on top of each other to make
* comparisons easy.
*
* <table style="text-align: left">
* <thead>
* <tr><th>Set</th><th>Argument</th><th>Label</th><th>Color</th><th>Description</th></tr>
* </thead>
* <tbody>
* <tr><td>Original</td><td>-before</td><td>BEFORE</td><td style="color: #ff34b3">Maroon1</td>
* <td>First pass recalibration
* tables obtained from applying {@link BaseRecalibration}
* on the original alignment.</td></tr>
* <tr><td>Recalibrated</td><td>-after</td><td>AFTER</td><td style="color: #0000ff">Blue</td>
* <td>Second pass recalibration tables
* results from the application of {@link BaseRecalibration}
* on the alignment recalibrated using the first pass tables</td></tr>
* <tr><td>Input</td><td>-BQSR</td><td>BQSR</td><td style="color: #000000">Black</td>
* <td>Any recalibration table without a specific role</td></tr>
* </tbody>
* </table>
* <br/>
*
* You need to specify one set at least. Multiple sets need to have the same values for the following parameters:
* <br/></br>
* <i>covariate (order is not important), no_standard_covs, run_without_dbsnp, solid_recal_mode,
* solid_nocall_strategy, mismatches_context_size, mismatches_default_quality, deletions_default_quality,
* insertions_default_quality, maximum_cycle_value, low_quality_tail, default_platform, force_platform,
* quantizing_levels</i> and <i>binary_tag_name</i>
* <h3>Output</h3>
*
* Currently this tool generates two outputs:
*
* <dl>
* <dt style="font-weight: normal">-plots <i>my-report.pdf</i></dt>
* <dd>A pdf document that encloses plots to assess the quality of the recalibration.</dd>
* <dt style="font-weight: normal">-csv <i>my-report.csv</i></dt>
* <dd>A csv file that contains a table with all the data required to generate those plots.</dd>
* </dl>
*
* You need to specify at least one of them.
*
* <h3>Other Arguments</h3>
*
* <h4>-ignoreLMT, --ignoreLastModificationTimes</h4>
*
* when set, no warning message will be displayed in the -before recalibration table file is older than the -after one.
*
* <h3>Examples</h3>
*
*
* <h4>Plot a single recalibration table</h4>
* <pre>
* java -jar GenomeAnalysisTK.jar \
* -T AnalyzeCovariates \
* -R myrefernce.fasta \
* -BQSR myrecal.table \
* -plots BQSR.pdf
* </pre>
*
* <h4>Plot before (first pass) and after (second pass) recalibration table to compare them</h4>
*
* <pre>
* java -jar GenomeAnalysisTK.jar \
* -T AnalyzeCovariates \
* -R myrefernce.fasta \
* -before recal2.table \
* -after recal3.table \
* -plots recalQC.pdf
* </pre>
*
* <h4>Plot up to three recalibration tables for comparison</h4>
*
* <pre>
*
* # You can ignore the before/after semantics completely if you like (if you do add -ignoreLMT
* # to avoid a possible warning), but all tables should have been generated using the same parameters.
*
* java -jar GenomeAnalysisTK.jar \
* -T AnalyzeCovariates \
* -R myrefernce.fasta \
* -ignoreLMT \
* -BQSR recal1.table \ # you can discard any two
* -before recal2.table \
* -after recal3.table \
* -plots myrecals.pdf
* </pre>
*
* <h4>Full BQSR quality assessment pipeline</h4>
*
* <pre>
* # Generate the first pass recalibration table file.
* java -jar GenomeAnalysisTK.jar \
* -T BaseRecalibrator \
* -R myreference.fasta \
* -I myinput.bam \
* -knownSites bundle/my-trusted-snps.vcf \ # optional but recommendable
* -knownSites bundle/my-trusted-indels.vcf \ # optional but recommendable
* ... other options
* -o firstpass.table
*
* # Generate the second pass recalibration table file.
* java -jar GenomeAnalysisTK.jar \
* -T BaseRecalibrator \
* -BQSR firstpass.table \
* -R myreference.fasta \
* -I myinput.bam \
* -knownSites bundle/my-trusted-snps.vcf \
* -knownSites bundle/my-trusted-indels.vcf \
* ... other options \
* -o secondpass.table
*
* # Finally generate the plots report and also keep a copy of the csv (optional).
* java -jar GenomeAnalysisTK.jar \
* -T AnalyzeCovariates \
* -R myrefernce.fasta \
* -before firstpass.table \
* -after secondpass.table \
* -csv BQSR.csv \ # optional
* -plots BQSR.pdf
* </pre>
*
* @author Valentin Ruano-Rubio &lt;valentin@broadinstitute.org&gt;
* @version 6/16/2013
* @since 2.6
*/
public final class AnalyzeCovariates extends RodWalker<AnalyzeCovariates.None,AnalyzeCovariates.None> {
// Constants on option short names that are used in some error/warning messages:
static final String CSV_ARG_SHORT_NAME = "csv";
static final String PDF_ARG_SHORT_NAME = "plots";
static final String BEFORE_ARG_SHORT_NAME = "before";
static final String AFTER_ARG_SHORT_NAME = "after";
/**
* File containing the recalibration tables from the first pass.
*/
@Input(shortName=BEFORE_ARG_SHORT_NAME,fullName="beforeReportFile", doc = "file containing the BQSR first-pass report file",required = false)
protected File beforeFile = null;
/**
* File containing the recalibration tables from the second pass.
*/
@Input(shortName=AFTER_ARG_SHORT_NAME, fullName="afterReportFile", doc = "file containing the BQSR second-pass report file",required = false)
protected File afterFile = null;
/**
* If true, it won't show a warning if the last-modification time of the before and after input files suggest that they have been reversed.
*/
@Argument(shortName="ignoreLMT", fullName="ignoreLastModificationTimes", doc= "do not emit warning messages related to suspicious last modification time order of inputs", required = false)
protected boolean ignoreLastModificationTime = false;
/**
* Output report file name.
*/
@Output(shortName=PDF_ARG_SHORT_NAME, fullName="plotsReportFile" ,doc = "location of the output report", required = false)
protected File pdfFile = null;
/**
* Output csv file name.
*/
@Output(shortName=CSV_ARG_SHORT_NAME,fullName="intermediateCsvFile" ,doc = "location of the csv intermediate file", required = false)
protected File csvFile = null;
/**
* Convenience reference to the RECAL_BQSR_FILE argument value.
* <p/>
* This field value is resolved by {@link #initialize()}.
*/
protected File bqsrFile = null;
/**
* Checks inputs and argument values.
* <p/>
* Notice that this routine will not validate the content of files. It may have some minor side effects as
* the output of warning messages back to the user.
*
* @throw IllegalStateException there is some required argument value that has not been loaded yet.
* @throw UserException if there is some error caused by or under the end user's control.
*/
private void checkArgumentsValues() {
checkInputReportFile("BQSR",bqsrFile);
checkInputReportFile("before",beforeFile);
checkInputReportFile("after",afterFile);
if (bqsrFile == null && beforeFile == null && afterFile == null) {
throw new UserException("you must provide at least one recalibration report file "
+ "(arguments -BQSR, -" + BEFORE_ARG_SHORT_NAME + " or -" + AFTER_ARG_SHORT_NAME);
}
checkOutputFile(PDF_ARG_SHORT_NAME,pdfFile);
checkOutputFile(CSV_ARG_SHORT_NAME, csvFile);
checkInputReportFileLMT(beforeFile,afterFile);
checkOutputRequested();
}
/**
* Checks whether the last-modification-time of the inputs is consistent with their relative roles.
*
* This routine does not thrown an exception but may output a warning message if inconsistencies are spotted.
*
* @param beforeFile the before report file.
* @param afterFile the after report file.
*/
private void checkInputReportFileLMT(final File beforeFile, final File afterFile) {
if (ignoreLastModificationTime || beforeFile == null || afterFile == null) {
return; // nothing to do here
} else if (beforeFile.lastModified() > afterFile.lastModified()) {
Utils.warnUser("Last modification timestamp for 'Before' and 'After'"
+ "recalibration reports are in the wrong order. Perhaps, have they been swapped?");
}
}
/**
* Checks that at least one output was requested.
*
* @throw UserException if no output was requested.
*/
private void checkOutputRequested() {
if (pdfFile == null && csvFile == null) {
throw new UserException("you need to request at least one output:"
+ " the intermediate csv file (-" + CSV_ARG_SHORT_NAME + " FILE)"
+ " or the final plot file (-" + PDF_ARG_SHORT_NAME + " FILE).");
}
}
/**
* Checks the value provided to input file arguments.
*
* @throw UserException if there is any problem cause by or under the end user's control
*
* @param name command line argument short name.
* @param value the argument value.
*/
private void checkInputReportFile(final String name,final File value) {
if (value == null) {
return;
} else if (!value.exists()) {
throw new UserException.BadArgumentValue(name, "input report '" +
value + "' does not exist or is unreachable");
} else if (!value.isFile()) {
throw new UserException.BadArgumentValue(name, "input report '" +
value + "' is not a regular file");
} else if (!value.canRead()) {
throw new UserException.BadArgumentValue(name, "input report '" +
value + "' cannot be read");
}
}
/**
* Checks the value provided for output arguments.
*
* @throw UserException if there is any problem cause by or under the end user's control
*
* @param name command line argument short name.
* @param value the argument value.
*/
private void checkOutputFile(final String name, final File value) {
if (value == null) {
return;
}
if (value.exists() && !value.isFile()) {
throw new UserException.BadArgumentValue(name, "the output file location '"
+ value + "' exists as not a file");
}
final File parent = value.getParentFile();
if (parent == null) {
return;
}
if (!parent.exists()) {
throw new UserException.BadArgumentValue(name, "the output file parent directory '"
+ parent + "' does not exists or is unreachable");
} else if (!parent.isDirectory()) {
throw new UserException.BadArgumentValue(name, "the output file parent directory '"
+ parent + "' is not a directory");
} else if (!parent.canWrite()) {
throw new UserException.BadArgumentValue(name, "the output file parent directory '"
+ parent + "' cannot be written");
}
}
/**
* Generates the plots using the external R script.
*
* <p/>
* If <code>plotsFile</code> is <code>null</code>, it does not perform any plotting.
*
* @param csvFile the intermediary csv file.
* @param plotsFile the output plot location.
*/
private void generatePlots(final File csvFile, final Map<String,File> reportFiles, final File plotsFile) {
if (plotsFile == null) {
return;
}
logger.info("Generating plots file '" + plotsFile + "'");
final File exampleReportFile = reportFiles.values().iterator().next();
RecalUtils.generatePlots(csvFile,exampleReportFile,plotsFile);
}
@Override
public void initialize() {
super.initialize();
bqsrFile = getToolkit().getArguments().BQSR_RECAL_FILE;
checkArgumentsValues();
final Map<String, File> reportFiles = buildReportFileMap();
final Map<String, RecalibrationReport> reports = buildReportMap(reportFiles);
checkReportConsistency(reports);
final File csvFile = resolveCsvFile();
generateCsvFile(csvFile,reports);
final File plotFile = resolvePlotFile();
generatePlots(csvFile, reportFiles, plotFile);
}
/**
* Returns the plot output file
* @return might be <code>null</code> if the user has not indicated and output file.
*/
private File resolvePlotFile() {
return pdfFile;
}
/**
* Generates the intermediary Csv file.
*
* @param csvFile where to write the file.
* @param reports the reports to be included.
*/
private void generateCsvFile(final File csvFile, final Map<String, RecalibrationReport> reports) {
try {
logger.info("Generating csv file '" + csvFile + "'");
RecalUtils.generateCsv(csvFile, reports);
} catch (FileNotFoundException e) {
throw new UserException(
String.format("There is a problem creating the intermediary Csv file '%s': %s",
csvFile,e.getMessage()),e);
}
}
/**
* Checks whether multiple input recalibration report files argument values are consistent (equal).
*
* @param reports map with report to verify.
*
* @throw UserException if there is any inconsistency.
*/
private void checkReportConsistency(final Map<String, RecalibrationReport> reports) {
final Map.Entry<String,RecalibrationReport>[] reportEntries =
reports.entrySet().toArray((Map.Entry<String,RecalibrationReport>[]) new Map.Entry[reports.size()]);
final Map.Entry<String,RecalibrationReport> exampleEntry = reportEntries[0];
for (int i = 1; i < reportEntries.length; i++) {
final Map<String,? extends CharSequence> diffs = exampleEntry.getValue().getRAC().compareReportArguments(
reportEntries[i].getValue().getRAC(),exampleEntry.getKey(),reportEntries[i].getKey());
if (diffs.size() != 0) {
throw new UserException.IncompatibleRecalibrationTableParameters("There are differences in relevant arguments of"
+ " two or more input recalibration reports. Please make sure"
+ " they have been created using the same recalibration parameters."
+ " " + Utils.join("// ", reportDifferencesStringArray(diffs)));
}
}
}
/**
* Creates a map with all input recalibration files indexed by their "role".
* <p/>
* The key is the role and the value the corresponding report file.
* <p/>
* Roles: "Before" (recalibration), "After" (recalibration), "BQSR" (the tool standard argument recalibration file)
*
* @return never <code>null</code>
*/
private Map<String, File> buildReportFileMap() {
final Map<String,File> reports = new LinkedHashMap<>(3);
if (bqsrFile != null) {
reports.put("BQSR",bqsrFile);
}
if (beforeFile != null) {
reports.put("Before",beforeFile);
}
if (afterFile != null) {
reports.put("After",afterFile);
}
return reports;
}
/**
* Transforms a recalibration file map into a report object map.
*
* @param reportFileMap the file map to transforms.
* @return never <code>null</code>, a new map with the same size as
* <code>reportFileMap</code> and the same key set.
*/
@Requires("reportFileMap != null")
private Map<String, RecalibrationReport> buildReportMap(final Map<String, File> reportFileMap) {
final Map<String,RecalibrationReport> reports = new LinkedHashMap<>(reportFileMap.size());
for (final Map.Entry<String,File> e : reportFileMap.entrySet()) {
reports.put(e.getKey(),new RecalibrationReport(e.getValue()));
}
return reports;
}
/**
* Generates a flatter String array representation of recalibration argument differences.
* @param diffs the differences to represent.
*
* @return never <code>null</code>, an array of the same length as the size of the input <code>diffs</code>.
*/
@Requires("diffs != null")
private String[] reportDifferencesStringArray(final Map<String, ? extends CharSequence> diffs) {
final String[] result = new String[diffs.size()];
int i = 0;
for (final Map.Entry<String, ? extends CharSequence> e : diffs.entrySet()) {
result[i++] = capitalize(e.getKey()) + ": " + e.getValue();
}
return result;
}
/**
* Returns the input string capitalizing the first letter.
*
* @param str the string to capitalize
* @return never <code>null</code>.
*/
@Requires("str != null")
private String capitalize(final String str) {
if (str.isEmpty()) {
return str;
} else {
return Character.toUpperCase(str.charAt(0)) + str.substring(1);
}
}
/**
* Returns the csv file to use.
* <p/>
* This is the the one specified by the user if any or a temporary file
* that will be deleted as soon as the VM exists by default.
*
* @return never <code>null</code>.
*/
private File resolveCsvFile() {
if (csvFile != null) {
return csvFile;
} else {
try {
final File result = File.createTempFile("AnalyzeCovariates", ".csv");
result.deleteOnExit();
return result;
} catch (IOException e) {
throw new UserException("Could not create temporary Csv file",e);
}
}
}
/**
* Always return true, forcing the immediate termination of the travesal.
* @return
*/
@Override
public boolean isDone() {
return true;
}
/**
* {@inheritDoc}
*/
@Override
public None reduceInit() {
return new None();
}
/**
* Is not supposed to ever be called, thus it always results in an exception.
*
* @throws IllegalStateException always.
*/
@Override
public None reduce(None value, None sum) {
throw new IllegalStateException("AnalyzeCovariates reduce method is not supposed to be invoked ever");
}
/**
* Is not supposed to ever be called, thus it always results in an exception.
*
* @throws IllegalStateException always.
*/
@Override
public None map(RefMetaDataTracker tracker, ReferenceContext ref, AlignmentContext context) {
throw new IllegalStateException("AnalyzeCovariates map method is not supposed to be invoked ever");
}
/**
* Dummy map and reduce types for the {@link AnalyzeCovariates} tool that in fact does not do any traversal.
*/
protected static class None {
private None() {
}
}
}

View File

@ -92,18 +92,6 @@ public class BQSRGatherer extends Gatherer {
generalReport.calculateQuantizedQualities();
RecalibrationArgumentCollection RAC = generalReport.getRAC();
if ( RAC.RECAL_PDF_FILE != null ) {
RAC.RECAL_TABLE_FILE = output;
if ( RAC.existingRecalibrationReport != null ) {
final RecalibrationReport originalReport = new RecalibrationReport(RAC.existingRecalibrationReport);
RecalUtils.generateRecalibrationPlot(RAC, originalReport.getRecalibrationTables(), generalReport.getRecalibrationTables(), generalReport.getCovariates());
}
else {
RecalUtils.generateRecalibrationPlot(RAC, generalReport.getRecalibrationTables(), generalReport.getCovariates());
}
}
generalReport.output(outputFile);
}
}

View File

@ -61,6 +61,7 @@ import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.*;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.BaseUtils;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.baq.BAQ;
import org.broadinstitute.sting.utils.clipping.ReadClipper;
import org.broadinstitute.sting.utils.collections.Pair;
@ -124,7 +125,7 @@ import java.util.List;
* -R resources/Homo_sapiens_assembly18.fasta \
* -knownSites bundle/hg18/dbsnp_132.hg18.vcf \
* -knownSites another/optional/setOfSitesToMask.vcf \
* -o recal_data.grp
* -o recal_data.table
* </pre>
*/
@ -366,9 +367,7 @@ public class BaseRecalibrator extends ReadWalker<Long, Long> implements NanoSche
}
protected static int[] calculateIsIndel( final GATKSAMRecord read, final EventType mode ) {
final byte[] readBases = read.getReadBases();
final int[] indel = new int[readBases.length];
Arrays.fill(indel, 0);
final int[] indel = new int[read.getReadBases().length];
int readPos = 0;
for ( final CigarElement ce : read.getCigar().getCigarElements() ) {
final int elementLength = ce.getLength();
@ -383,21 +382,19 @@ public class BaseRecalibrator extends ReadWalker<Long, Long> implements NanoSche
}
case D:
{
final int index = ( read.getReadNegativeStrandFlag() ? readPos : ( readPos > 0 ? readPos - 1 : readPos ) );
indel[index] = ( mode.equals(EventType.BASE_DELETION) ? 1 : 0 );
final int index = ( read.getReadNegativeStrandFlag() ? readPos : readPos - 1 );
updateIndel(indel, index, mode, EventType.BASE_DELETION);
break;
}
case I:
{
final boolean forwardStrandRead = !read.getReadNegativeStrandFlag();
if( forwardStrandRead ) {
indel[(readPos > 0 ? readPos - 1 : readPos)] = ( mode.equals(EventType.BASE_INSERTION) ? 1 : 0 );
}
for (int iii = 0; iii < elementLength; iii++) {
readPos++;
updateIndel(indel, readPos - 1, mode, EventType.BASE_INSERTION);
}
readPos += elementLength;
if( !forwardStrandRead ) {
indel[(readPos < indel.length ? readPos : readPos - 1)] = ( mode.equals(EventType.BASE_INSERTION) ? 1 : 0 );
updateIndel(indel, readPos, mode, EventType.BASE_INSERTION);
}
break;
}
@ -412,6 +409,12 @@ public class BaseRecalibrator extends ReadWalker<Long, Long> implements NanoSche
return indel;
}
private static void updateIndel(final int[] indel, final int index, final EventType mode, final EventType requiredMode) {
if ( mode == requiredMode && index >= 0 && index < indel.length )
// protect ourselves from events at the start or end of the read (1D3M or 3M1D)
indel[index] = 1;
}
protected static double[] calculateFractionalErrorArray( final int[] errorArray, final byte[] baqArray ) {
if(errorArray.length != baqArray.length ) {
throw new ReviewedStingException("Array length mismatch detected. Malformed read?");
@ -514,28 +517,13 @@ public class BaseRecalibrator extends ReadWalker<Long, Long> implements NanoSche
generateReport();
logger.info("...done!");
if ( RAC.RECAL_PDF_FILE != null ) {
logger.info("Generating recalibration plots...");
generatePlots();
}
logger.info("Processed: " + result + " reads");
logger.info("BaseRecalibrator was able to recalibrate " + result + " reads");
}
private RecalibrationTables getRecalibrationTable() {
return recalibrationEngine.getFinalRecalibrationTables();
}
private void generatePlots() {
File recalFile = getToolkit().getArguments().BQSR_RECAL_FILE;
if (recalFile != null) {
RecalibrationReport report = new RecalibrationReport(recalFile);
RecalUtils.generateRecalibrationPlot(RAC, report.getRecalibrationTables(), getRecalibrationTable(), requestedCovariates);
}
else
RecalUtils.generateRecalibrationPlot(RAC, getRecalibrationTable(), requestedCovariates);
}
/**
* go through the quality score table and use the # observations and the empirical quality score
* to build a quality score histogram for quantization. Then use the QuantizeQual algorithm to

View File

@ -46,15 +46,17 @@
package org.broadinstitute.sting.gatk.walkers.bqsr;
import com.google.java.contract.Requires;
import org.broad.tribble.Feature;
import org.broadinstitute.sting.commandline.*;
import org.broadinstitute.sting.gatk.report.GATKReportTable;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.exceptions.StingException;
import org.broadinstitute.sting.utils.recalibration.RecalUtils;
import java.io.File;
import java.io.PrintStream;
import java.util.Collections;
import java.util.List;
import java.util.*;
/**
* Created by IntelliJ IDEA.
@ -65,7 +67,7 @@ import java.util.List;
* This set of arguments will also be passed to the constructor of every Covariate when it is instantiated.
*/
public class RecalibrationArgumentCollection {
public class RecalibrationArgumentCollection implements Cloneable {
/**
* This algorithm treats every reference mismatch as an indication of error. However, real genetic variation is expected to mismatch the reference,
@ -87,21 +89,6 @@ public class RecalibrationArgumentCollection {
public File RECAL_TABLE_FILE = null;
public PrintStream RECAL_TABLE;
/**
* If not provided, then no plots will be generated (useful for queue scatter/gathering).
* However, we *highly* recommend that users generate these plots whenever possible for QC checking.
*/
@Output(fullName = "plot_pdf_file", shortName = "plots", doc = "The output recalibration pdf file to create", required = false, defaultToStdout = false)
public File RECAL_PDF_FILE = null;
/**
* If not provided, then a temporary file is created and then deleted upon completion.
* For advanced users only.
*/
@Advanced
@Argument(fullName = "intermediate_csv_file", shortName = "intermediate", doc = "The intermediate csv file to create", required = false)
public File RECAL_CSV_FILE = null;
/**
* Note that the --list argument requires a fully resolved and correct command-line to work.
*/
@ -219,6 +206,10 @@ public class RecalibrationArgumentCollection {
@Argument(fullName = "force_platform", shortName = "fP", required = false, doc = "If provided, the platform of EVERY read will be forced to be the provided String. Valid options are illumina, 454, and solid.")
public String FORCE_PLATFORM = null;
@Hidden
@Argument(fullName = "force_readgroup", shortName = "fRG", required = false, doc = "If provided, the read group of EVERY read will be forced to be the provided String.")
public String FORCE_READGROUP = null;
@Hidden
@Output(fullName = "recal_table_update_log", shortName = "recal_table_update_log", required = false, doc = "If provided, log all updates to the recalibration tables to the given file. For debugging/testing purposes only", defaultToStdout = false)
public PrintStream RECAL_TABLE_UPDATE_LOG = null;
@ -278,11 +269,147 @@ public class RecalibrationArgumentCollection {
argumentsTable.set("quantizing_levels", RecalUtils.ARGUMENT_VALUE_COLUMN_NAME, QUANTIZING_LEVELS);
argumentsTable.addRowID("recalibration_report", true);
argumentsTable.set("recalibration_report", RecalUtils.ARGUMENT_VALUE_COLUMN_NAME, existingRecalibrationReport == null ? "null" : existingRecalibrationReport.getAbsolutePath());
argumentsTable.addRowID("plot_pdf_file", true);
argumentsTable.set("plot_pdf_file", RecalUtils.ARGUMENT_VALUE_COLUMN_NAME, RECAL_PDF_FILE == null ? "null" : RECAL_PDF_FILE.getAbsolutePath());
argumentsTable.addRowID("binary_tag_name", true);
argumentsTable.set("binary_tag_name", RecalUtils.ARGUMENT_VALUE_COLUMN_NAME, BINARY_TAG_NAME == null ? "null" : BINARY_TAG_NAME);
return argumentsTable;
}
/**
* Returns a map with the arguments that differ between this an
* another {@link RecalibrationArgumentCollection} instance.
* <p/>
* The key is the name of that argument in the report file. The value is a message
* that explains the difference to the end user.
* <p/>
* Thus, a empty map indicates that there is no differences between both argument collection that
* is relevant to report comparison.
* <p/>
* This method should not throw any exception.
*
* @param other the argument-collection to compare against.
* @param thisRole the name used to refer to this RAC report that makes sense to the end user.
* @param otherRole the name used to refer to the other RAC report that makes sense to the end user.
*
* @return never <code>null</code>, but a zero-size collection if there are no differences.
*/
@Requires("other != null && thisRole != null && otherRole != null && !thisRole.equalsIgnoreCase(otherRole)")
Map<String,? extends CharSequence> compareReportArguments(final RecalibrationArgumentCollection other,final String thisRole, final String otherRole) {
final Map<String,String> result = new LinkedHashMap<>(15);
compareRequestedCovariates(result, other, thisRole, otherRole);
compareSimpleReportArgument(result,"no_standard_covs", DO_NOT_USE_STANDARD_COVARIATES, other.DO_NOT_USE_STANDARD_COVARIATES, thisRole, otherRole);
compareSimpleReportArgument(result,"run_without_dbsnp",RUN_WITHOUT_DBSNP,other.RUN_WITHOUT_DBSNP,thisRole,otherRole);
compareSimpleReportArgument(result,"solid_recal_mode", SOLID_RECAL_MODE, other.SOLID_RECAL_MODE,thisRole,otherRole);
compareSimpleReportArgument(result,"solid_nocall_strategy", SOLID_NOCALL_STRATEGY, other.SOLID_NOCALL_STRATEGY,thisRole,otherRole);
compareSimpleReportArgument(result,"mismatches_context_size", MISMATCHES_CONTEXT_SIZE,other.MISMATCHES_CONTEXT_SIZE,thisRole,otherRole);
compareSimpleReportArgument(result,"mismatches_default_quality", MISMATCHES_DEFAULT_QUALITY, other.MISMATCHES_DEFAULT_QUALITY,thisRole,otherRole);
compareSimpleReportArgument(result,"deletions_default_quality", DELETIONS_DEFAULT_QUALITY, other.DELETIONS_DEFAULT_QUALITY,thisRole,otherRole);
compareSimpleReportArgument(result,"insertions_default_quality", INSERTIONS_DEFAULT_QUALITY, other.INSERTIONS_DEFAULT_QUALITY,thisRole,otherRole);
compareSimpleReportArgument(result,"maximum_cycle_value", MAXIMUM_CYCLE_VALUE, other.MAXIMUM_CYCLE_VALUE,thisRole,otherRole);
compareSimpleReportArgument(result,"low_quality_tail", LOW_QUAL_TAIL, other.LOW_QUAL_TAIL,thisRole,otherRole);
compareSimpleReportArgument(result,"default_platform", DEFAULT_PLATFORM, other.DEFAULT_PLATFORM,thisRole,otherRole);
compareSimpleReportArgument(result,"force_platform", FORCE_PLATFORM, other.FORCE_PLATFORM,thisRole,otherRole);
compareSimpleReportArgument(result,"quantizing_levels", QUANTIZING_LEVELS, other.QUANTIZING_LEVELS,thisRole,otherRole);
compareSimpleReportArgument(result,"binary_tag_name", BINARY_TAG_NAME, other.BINARY_TAG_NAME,thisRole,otherRole);
return result;
}
/**
* Compares the covariate report lists.
*
* @param diffs map where to annotate the difference.
* @param other the argument collection to compare against.
* @param thisRole the name for this argument collection that makes sense to the user.
* @param otherRole the name for the other argument collection that makes sense to the end user.
*
* @return <code>true</code> if a difference was found.
*/
@Requires("diffs != null && other != null && thisRole != null && otherRole != null")
private boolean compareRequestedCovariates(final Map<String,String> diffs,
final RecalibrationArgumentCollection other, final String thisRole, final String otherRole) {
final Set<String> beforeNames = new HashSet<>(this.COVARIATES.length);
final Set<String> afterNames = new HashSet<>(other.COVARIATES.length);
Utils.addAll(beforeNames, this.COVARIATES);
Utils.addAll(afterNames,other.COVARIATES);
final Set<String> intersect = new HashSet<>(Math.min(beforeNames.size(),afterNames.size()));
intersect.addAll(beforeNames);
intersect.retainAll(afterNames);
String diffMessage = null;
if (intersect.size() == 0) { // In practice this is not possible due to required covariates but...
diffMessage = String.format("There are no common covariates between '%s' and '%s'"
+ " recalibrator reports. Covariates in '%s': {%s}. Covariates in '%s': {%s}.",thisRole,otherRole,
thisRole,Utils.join(", ",this.COVARIATES),
otherRole,Utils.join(",",other.COVARIATES));
} else if (intersect.size() != beforeNames.size() || intersect.size() != afterNames.size()) {
beforeNames.removeAll(intersect);
afterNames.removeAll(intersect);
diffMessage = String.format("There are differences in the set of covariates requested in the"
+ " '%s' and '%s' recalibrator reports. "
+ " Exclusive to '%s': {%s}. Exclusive to '%s': {%s}.",thisRole,otherRole,
thisRole,Utils.join(", ",beforeNames),
otherRole,Utils.join(", ",afterNames));
}
if (diffMessage != null) {
diffs.put("covariate",diffMessage);
return true;
} else {
return false;
}
}
/**
* Annotates a map with any difference encountered in a simple value report argument that differs between this an
* another {@link RecalibrationArgumentCollection} instance.
* <p/>
* The key of the new entry would be the name of that argument in the report file. The value is a message
* that explains the difference to the end user.
* <p/>
*
* <p/>
* This method should not return any exception.
*
* @param diffs where to annotate the differences.
* @param name the name of the report argument to compare.
* @param thisValue this argument collection value for that argument.
* @param otherValue the other collection value for that argument.
* @param thisRole the name used to refer to this RAC report that makes sense to the end user.
* @param otherRole the name used to refer to the other RAC report that makes sense to the end user.
*
* @type T the argument Object value type.
*
* @return <code>true</code> if a difference has been spotted, thus <code>diff</code> has been modified.
*/
private <T> boolean compareSimpleReportArgument(final Map<String,String> diffs,
final String name, final T thisValue, final T otherValue, final String thisRole, final String otherRole) {
if (thisValue == null && otherValue == null) {
return false;
} else if (thisValue != null && thisValue.equals(otherValue)) {
return false;
} else {
diffs.put(name,
String.format("differences between '%s' {%s} and '%s' {%s}.",
thisRole,thisValue == null ? "" : thisValue,
otherRole,otherValue == null ? "" : otherValue));
return true;
}
}
/**
* Create a shallow copy of this argument collection.
*
* @return never <code>null</code>.
*/
@Override
public RecalibrationArgumentCollection clone() {
try {
return (RecalibrationArgumentCollection) super.clone();
} catch (CloneNotSupportedException e) {
throw new StingException("Unreachable code clone not supported thrown when the class "
+ this.getClass().getName() + " is cloneable ",e);
}
}
}

View File

@ -207,7 +207,7 @@ public class HeaderElement {
public void removeInsertionToTheRight() {
this.insertionsToTheRight--;
if (insertionsToTheRight < 0)
throw new ReviewedStingException("Removed too many insertions, header is now negative!");
throw new ReviewedStingException("Removed too many insertions, header is now negative at position " + location);
}
public boolean hasInsertionToTheRight() {

View File

@ -64,6 +64,7 @@ import org.broadinstitute.sting.gatk.io.StingSAMFileWriter;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.*;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.SampleUtils;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.clipping.ReadClipper;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
@ -236,6 +237,15 @@ public class ReduceReads extends ReadWalker<ObjectArrayList<GATKSAMRecord>, Redu
@Argument(fullName = "downsample_coverage", shortName = "ds", doc = "", required = false)
public int downsampleCoverage = 250;
/**
* Generally, this tool is not meant to be run for more than 1 sample at a time. The one valid exception
* brought to our attention by colleagues is the specific case of tumor/normal pairs in cancer analysis.
* To prevent users from unintentionally running the tool in a less than ideal manner, we require them
* to explicitly enable multi-sample analysis with this argument.
*/
@Argument(fullName = "cancer_mode", shortName = "cancer_mode", doc = "enable multi-samples reduction for cancer analysis", required = false)
public boolean ALLOW_MULTIPLE_SAMPLES = false;
@Hidden
@Argument(fullName = "nwayout", shortName = "nw", doc = "", required = false)
public boolean nwayout = false;
@ -263,8 +273,9 @@ public class ReduceReads extends ReadWalker<ObjectArrayList<GATKSAMRecord>, Redu
int nCompressedReads = 0;
Object2LongOpenHashMap<String> readNameHash; // This hash will keep the name of the original read the new compressed name (a number).
private static int READ_NAME_HASH_DEFAULT_SIZE = 1000;
Long nextReadNumber = 1L; // The next number to use for the compressed read name.
Object2LongOpenHashMap<String> readNameHash; // This hash will keep the name of the original read the new compressed name (a number).
ObjectSortedSet<GenomeLoc> intervalList;
@ -294,13 +305,16 @@ public class ReduceReads extends ReadWalker<ObjectArrayList<GATKSAMRecord>, Redu
if ( minAltProportionToTriggerVariant < 0.0 || minAltProportionToTriggerVariant > 1.0 )
throw new UserException.BadArgumentValue("--minimum_alt_proportion_to_trigger_variant", "must be a value between 0 and 1 (inclusive)");
if ( SampleUtils.getSAMFileSamples(getToolkit().getSAMFileHeader()).size() > 1 && !ALLOW_MULTIPLE_SAMPLES )
throw new UserException.BadInput("Reduce Reads is not meant to be run for more than 1 sample at a time except for the specific case of tumor/normal pairs in cancer analysis");
if ( known.isEmpty() )
knownSnpPositions = null;
else
knownSnpPositions = new ObjectAVLTreeSet<GenomeLoc>();
GenomeAnalysisEngine toolkit = getToolkit();
readNameHash = new Object2LongOpenHashMap<String>(100000); // prepare the read name hash to keep track of what reads have had their read names compressed
this.resetReadNameHash(); // prepare the read name hash to keep track of what reads have had their read names compressed
intervalList = new ObjectAVLTreeSet<GenomeLoc>(); // get the interval list from the engine. If no interval list was provided, the walker will work in WGS mode
if (toolkit.getIntervals() != null)
@ -322,6 +336,16 @@ public class ReduceReads extends ReadWalker<ObjectArrayList<GATKSAMRecord>, Redu
}
}
/** Initializer for {@link #readNameHash}. */
private void resetReadNameHash() {
// If the hash grows large, subsequent clear operations can be very expensive, so trim the hash down if it grows beyond its default.
if (readNameHash == null || readNameHash.size() > READ_NAME_HASH_DEFAULT_SIZE) {
readNameHash = new Object2LongOpenHashMap<String>(READ_NAME_HASH_DEFAULT_SIZE);
} else {
readNameHash.clear();
}
}
/**
* Takes in a read and prepares it for the SlidingWindow machinery by performing the
* following optional clipping operations:
@ -458,7 +482,7 @@ public class ReduceReads extends ReadWalker<ObjectArrayList<GATKSAMRecord>, Redu
// stash.compress(), the readNameHash can be cleared after the for() loop above.
// The advantage of clearing the hash is that otherwise it holds all reads that have been encountered,
// which can use a lot of memory and cause RR to slow to a crawl and/or run out of memory.
readNameHash.clear();
this.resetReadNameHash();
}
} else

View File

@ -877,6 +877,10 @@ public class SlidingWindow {
final int start = region.getStart() - windowHeaderStart;
int stop = region.getStop() - windowHeaderStart;
// make sure the bitset is complete given the region (it might not be in multi-sample mode)
if ( region.getStop() > markedSites.getStartLocation() + markedSites.getVariantSiteBitSet().length )
markSites(region.getStop());
CloseVariantRegionResult closeVariantRegionResult = closeVariantRegion(start, stop, knownSnpPositions);
allReads.addAll(closeVariantRegionResult.reads);
@ -1195,7 +1199,7 @@ public class SlidingWindow {
}
// Special case for leading insertions before the beginning of the sliding read
if ( ReadUtils.readStartsWithInsertion(read).getFirst() && (readStart == headerStart || headerStart < 0) ) {
if ( (readStart == headerStart || headerStart < 0) && ReadUtils.readStartsWithInsertion(read.getCigar(), false) != null ) {
// create a new first element to the window header with no bases added
header.addFirst(new HeaderElement(readStart - 1));
// this allows the first element (I) to look at locationIndex - 1 when we update the header and do the right thing

View File

@ -63,6 +63,10 @@ abstract class AbstractStratification {
private Map<CallableStatus, Integer> statusTally = null;
protected ThresHolder thresholds;
public AbstractStratification(ThresHolder thresholds) {
this.thresholds = thresholds;
}
/**
* Calculates the average "good" coverage of this sample. Good means "passes the base and
* mapping quality requirements.
@ -116,11 +120,11 @@ abstract class AbstractStratification {
*
* @return the callable status(es) for the whole object
*/
public abstract Iterable<CallableStatus> callableStatuses();
public abstract List<CallableStatus> callableStatuses();
/**
* Tally up all the callable status of all the loci in this sample.
* Tally up all the callable status of all elements of the stratification.
*
* @return a map of callable status and counts
*/
@ -136,10 +140,10 @@ abstract class AbstractStratification {
return statusTally;
}
public static List<CallableStatus> queryStatus(List<Metric> statList, AbstractStratification stratification) {
public List<CallableStatus> queryStatus(List<Metric> statList) {
List<CallableStatus> output = new LinkedList<CallableStatus>();
for (Metric stat : statList) {
final CallableStatus status = stat.status(stratification);
final CallableStatus status = stat.status(this);
if (status != null) {
output.add(status);
}

View File

@ -52,6 +52,7 @@ import org.broadinstitute.sting.commandline.Output;
import org.broadinstitute.sting.gatk.CommandLineGATK;
import org.broadinstitute.sting.gatk.contexts.AlignmentContext;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.downsampling.DownsampleType;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.*;
import org.broadinstitute.sting.utils.GenomeLoc;
@ -65,6 +66,7 @@ import org.broadinstitute.variant.variantcontext.*;
import org.broadinstitute.variant.variantcontext.writer.VariantContextWriter;
import org.broadinstitute.variant.vcf.*;
import java.io.PrintStream;
import java.util.*;
/**
@ -109,9 +111,13 @@ import java.util.*;
@DocumentedGATKFeature( groupName = HelpConstants.DOCS_CAT_QC, extraDocs = {CommandLineGATK.class} )
@By(value = DataSource.READS)
@PartitionBy(PartitionType.INTERVAL)
@Downsample(by = DownsampleType.NONE)
public class DiagnoseTargets extends LocusWalker<Long, Long> {
private static final String AVG_INTERVAL_DP_KEY = "IDP";
private static final String LOW_COVERAGE_LOCI = "LL";
private static final String ZERO_COVERAGE_LOCI = "ZL";
@Output(doc = "File to which interval statistics should be written")
private VariantContextWriter vcfWriter = null;
@ -119,13 +125,12 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
@ArgumentCollection
private ThresHolder thresholds = new ThresHolder();
private Map<GenomeLoc, IntervalStratification> intervalMap = null; // maps each interval => statistics
private Map<GenomeLoc, IntervalStratification> intervalMap = null; // maps each interval => statistics
private PeekableIterator<GenomeLoc> intervalListIterator; // an iterator to go over all the intervals provided as we traverse the genome
private Set<String> samples = null; // all the samples being processed
private static final Allele SYMBOLIC_ALLELE = Allele.create("<DT>", false); // avoid creating the symbolic allele multiple times
private static final Allele UNCOVERED_ALLELE = Allele.create("A", true); // avoid creating the 'fake' ref allele for uncovered intervals multiple times
private static final int INITIAL_HASH_SIZE = 500000;
private static final int INITIAL_HASH_SIZE = 50; // enough room for potential overlapping intervals plus recently finished intervals
@Override
public void initialize() {
@ -134,7 +139,7 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
if (getToolkit().getIntervals() == null || getToolkit().getIntervals().isEmpty())
throw new UserException("This tool only works if you provide one or more intervals (use the -L argument). If you want to run whole genome, use -T DepthOfCoverage instead.");
intervalMap = new HashMap<GenomeLoc, IntervalStratification>(INITIAL_HASH_SIZE);
intervalMap = new LinkedHashMap<GenomeLoc, IntervalStratification>(INITIAL_HASH_SIZE);
intervalListIterator = new PeekableIterator<GenomeLoc>(getToolkit().getIntervals().iterator());
// get all of the unique sample names for the VCF Header
@ -146,13 +151,13 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
}
@Override
public Long map(RefMetaDataTracker tracker, ReferenceContext ref, AlignmentContext context) {
public Long map(final RefMetaDataTracker tracker, final ReferenceContext ref, final AlignmentContext context) {
GenomeLoc refLocus = ref.getLocus();
// process and remove any intervals in the map that are don't overlap the current locus anymore
// and add all new intervals that may overlap this reference locus
outputFinishedIntervals(refLocus, ref.getBase());
addNewOverlappingIntervals(refLocus);
outputFinishedIntervals(refLocus, ref.getBase());
// at this point, all intervals in intervalMap overlap with this locus, so update all of them
for (IntervalStratification intervalStratification : intervalMap.values())
@ -184,7 +189,7 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
* @param result number of loci processed by the walker
*/
@Override
public void onTraversalDone(Long result) {
public void onTraversalDone(final Long result) {
for (GenomeLoc interval : intervalMap.keySet())
outputStatsToVCF(intervalMap.get(interval), UNCOVERED_ALLELE);
@ -194,6 +199,10 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
intervalListIterator.next();
interval = intervalListIterator.peek();
}
if (thresholds.missingTargets != null) {
thresholds.missingTargets.close();
}
}
/**
@ -203,24 +212,21 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
* @param refBase the reference allele
*/
private void outputFinishedIntervals(final GenomeLoc refLocus, final byte refBase) {
GenomeLoc interval = intervalListIterator.peek();
// output empty statistics for uncovered intervals
while (interval != null && interval.isBefore(refLocus)) {
final IntervalStratification stats = intervalMap.get(interval);
outputStatsToVCF(stats != null ? stats : createIntervalStatistic(interval), UNCOVERED_ALLELE);
if (stats != null) intervalMap.remove(interval);
intervalListIterator.next();
interval = intervalListIterator.peek();
}
// remove any potential leftover interval in intervalMap (this will only happen when we have overlapping intervals)
// output any intervals that were finished
final List<GenomeLoc> toRemove = new LinkedList<GenomeLoc>();
for (GenomeLoc key : intervalMap.keySet()) {
if (key.isBefore(refLocus)) {
outputStatsToVCF(intervalMap.get(key), Allele.create(refBase, true));
intervalMap.remove(key);
final IntervalStratification intervalStats = intervalMap.get(key);
outputStatsToVCF(intervalStats, Allele.create(refBase, true));
if (hasMissingLoci(intervalStats)) {
outputMissingInterval(intervalStats);
}
toRemove.add(key);
}
}
for (GenomeLoc key : toRemove) {
intervalMap.remove(key);
}
}
/**
@ -228,7 +234,7 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
*
* @param refLocus the current reference locus
*/
private void addNewOverlappingIntervals(GenomeLoc refLocus) {
private void addNewOverlappingIntervals(final GenomeLoc refLocus) {
GenomeLoc interval = intervalListIterator.peek();
while (interval != null && !interval.isPast(refLocus)) {
intervalMap.put(interval, createIntervalStatistic(interval));
@ -243,14 +249,24 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
* @param stats The statistics of the interval
* @param refAllele the reference allele
*/
private void outputStatsToVCF(IntervalStratification stats, Allele refAllele) {
private void outputStatsToVCF(final IntervalStratification stats, final Allele refAllele) {
GenomeLoc interval = stats.getInterval();
final List<Allele> alleles = new ArrayList<Allele>();
final Map<String, Object> attributes = new HashMap<String, Object>();
final ArrayList<Genotype> genotypes = new ArrayList<Genotype>();
List<Allele> alleles = new ArrayList<Allele>();
Map<String, Object> attributes = new HashMap<String, Object>();
ArrayList<Genotype> genotypes = new ArrayList<Genotype>();
for (String sample : samples) {
final GenotypeBuilder gb = new GenotypeBuilder(sample);
SampleStratification sampleStat = stats.getSampleStatistics(sample);
gb.attribute(AVG_INTERVAL_DP_KEY, sampleStat.averageCoverage(interval.size()));
gb.attribute(LOW_COVERAGE_LOCI, sampleStat.getNLowCoveredLoci());
gb.attribute(ZERO_COVERAGE_LOCI, sampleStat.getNUncoveredLoci());
gb.filters(statusToStrings(stats.getSampleStatistics(sample).callableStatuses(), false));
genotypes.add(gb.make());
}
alleles.add(refAllele);
alleles.add(SYMBOLIC_ALLELE);
VariantContextBuilder vcb = new VariantContextBuilder("DiagnoseTargets", interval.getContig(), interval.getStart(), interval.getStop(), alleles);
@ -262,21 +278,56 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
attributes.put(AVG_INTERVAL_DP_KEY, stats.averageCoverage(interval.size()));
vcb = vcb.attributes(attributes);
for (String sample : samples) {
final GenotypeBuilder gb = new GenotypeBuilder(sample);
SampleStratification sampleStat = stats.getSampleStatistics(sample);
gb.attribute(AVG_INTERVAL_DP_KEY, sampleStat.averageCoverage(interval.size()));
gb.filters(statusToStrings(stats.getSampleStatistics(sample).callableStatuses(), false));
genotypes.add(gb.make());
}
vcb = vcb.genotypes(genotypes);
vcfWriter.add(vcb.make());
}
private boolean hasMissingStatuses(AbstractStratification stats) {
return !stats.callableStatuses().isEmpty();
}
private boolean hasMissingLoci(final IntervalStratification stats) {
return thresholds.missingTargets != null && hasMissingStatuses(stats);
}
private void outputMissingInterval(final IntervalStratification stats) {
final GenomeLoc interval = stats.getInterval();
final boolean missing[] = new boolean[interval.size()];
Arrays.fill(missing, true);
for (AbstractStratification sample : stats.getElements()) {
if (hasMissingStatuses(sample)) {
int pos = 0;
for (AbstractStratification locus : sample.getElements()) {
if (locus.callableStatuses().isEmpty()) {
missing[pos] = false;
}
pos++;
}
}
}
int start = -1;
boolean insideMissing = false;
for (int i = 0; i < missing.length; i++) {
if (missing[i] && !insideMissing) {
start = interval.getStart() + i;
insideMissing = true;
} else if (!missing[i] && insideMissing) {
final int stop = interval.getStart() + i - 1;
outputMissingInterval(interval.getContig(), start, stop);
insideMissing = false;
}
}
if (insideMissing) {
outputMissingInterval(interval.getContig(), start, interval.getStop());
}
}
private void outputMissingInterval(final String contig, final int start, final int stop) {
final PrintStream out = thresholds.missingTargets;
out.println(String.format("%s:%d-%d", contig, start, stop));
}
/**
* Function that process a set of statuses into strings
*
@ -345,6 +396,8 @@ public class DiagnoseTargets extends LocusWalker<Long, Long> {
// FORMAT fields for each genotype
headerLines.add(VCFStandardHeaderLines.getFormatLine(VCFConstants.GENOTYPE_FILTER_KEY));
headerLines.add(new VCFFormatHeaderLine(AVG_INTERVAL_DP_KEY, 1, VCFHeaderLineType.Float, "Average sample depth across the interval. Sum of the sample specific depth in all loci divided by interval size."));
headerLines.add(new VCFFormatHeaderLine(LOW_COVERAGE_LOCI, 1, VCFHeaderLineType.Integer, "Number of loci for this sample, in this interval with low coverage (below the minimum coverage) but not zero."));
headerLines.add(new VCFFormatHeaderLine(ZERO_COVERAGE_LOCI, 1, VCFHeaderLineType.Integer, "Number of loci for this sample, in this interval with zero coverage."));
// FILTER fields
for (CallableStatus stat : CallableStatus.values())

View File

@ -56,11 +56,11 @@ import java.util.*;
final class IntervalStratification extends AbstractStratification {
private final Map<String, AbstractStratification> samples;
private final GenomeLoc interval;
private final ThresHolder thresholds;
private List<CallableStatus> callableStatuses;
public IntervalStratification(Set<String> samples, GenomeLoc interval, ThresHolder thresholds) {
super(thresholds);
this.interval = interval;
this.thresholds = thresholds;
this.samples = new HashMap<String, AbstractStratification>(samples.size());
for (String sample : samples)
this.samples.put(sample, new SampleStratification(interval, thresholds));
@ -114,7 +114,13 @@ final class IntervalStratification extends AbstractStratification {
* {@inheritDoc}
*/
@Override
public Iterable<CallableStatus> callableStatuses() {
public List<CallableStatus> callableStatuses() {
if (callableStatuses == null)
callableStatuses = calculateStatus();
return callableStatuses;
}
private List<CallableStatus> calculateStatus() {
final List<CallableStatus> output = new LinkedList<CallableStatus>();
// check if any of the votes pass the threshold
@ -125,7 +131,7 @@ final class IntervalStratification extends AbstractStratification {
}
}
output.addAll(queryStatus(thresholds.intervalMetricList, this));
output.addAll(queryStatus(thresholds.intervalMetricList));
return output;
}

View File

@ -46,22 +46,20 @@
package org.broadinstitute.sting.gatk.walkers.diagnostics.diagnosetargets;
import java.util.LinkedList;
import java.util.List;
final class LocusStratification extends AbstractStratification {
private long coverage;
private long rawCoverage;
private final List<Metric> locusStatisticsList;
public LocusStratification(ThresHolder thresholds) {
this(0,0,thresholds);
}
protected LocusStratification(int coverage, int rawCoverage, ThresHolder thresholds) {
super(thresholds);
this.coverage = coverage;
this.rawCoverage = rawCoverage;
this.locusStatisticsList = thresholds.locusMetricList;
}
@Override
@ -79,14 +77,7 @@ final class LocusStratification extends AbstractStratification {
* @return a set of all statuses that apply
*/
public List<CallableStatus> callableStatuses() {
List<CallableStatus> output = new LinkedList<CallableStatus>();
for (Metric stats : locusStatisticsList) {
CallableStatus status = stats.status(this);
if (status != null) {
output.add(status);
}
}
return output;
return queryStatus(thresholds.locusMetricList);
}
@Override

View File

@ -58,6 +58,6 @@ final class PluginUtils {
final Map<CallableStatus, Integer> totals = sampleStratification.getStatusTally();
final int size = sampleStratification.getIntervalSize();
final int statusCount = totals.containsKey(CALL) ? totals.get(CALL) : 0;
return ( (double) statusCount / size) >= threshold ? CALL: null;
return ( (double) statusCount / size) > threshold ? CALL: null;
}
}

View File

@ -61,15 +61,14 @@ import java.util.List;
final class SampleStratification extends AbstractStratification {
private final GenomeLoc interval;
private final ArrayList<AbstractStratification> loci;
private final ThresHolder thresholds;
private int nReads = -1;
private int nBadMates = -1;
public SampleStratification(final GenomeLoc interval, final ThresHolder thresholds) {
super(thresholds);
this.interval = interval;
this.loci = new ArrayList<AbstractStratification>(interval.size());
this.thresholds = thresholds;
nReads = 0;
nBadMates = 0;
@ -118,10 +117,10 @@ final class SampleStratification extends AbstractStratification {
* {@inheritDoc}
*/
@Override
public Iterable<CallableStatus> callableStatuses() {
public List<CallableStatus> callableStatuses() {
final List<CallableStatus> output = new LinkedList<CallableStatus>();
// get the tally of all the locus callable statuses
// get the sample statuses of all the Loci Metrics
for (Metric locusStat : thresholds.locusMetricList) {
final CallableStatus status = ((LocusMetric) locusStat).sampleStatus(this);
if (status != null) {
@ -130,12 +129,7 @@ final class SampleStratification extends AbstractStratification {
}
// get the sample specific statitics statuses
for (Metric sampleStat : thresholds.sampleMetricList) {
final CallableStatus status = sampleStat.status(this);
if (status != null) {
output.add(status);
}
}
output.addAll(queryStatus(thresholds.sampleMetricList));
// special case, if there are no reads, then there is no sense reporting coverage gaps.
if (output.contains(CallableStatus.NO_READS) && output.contains(CallableStatus.COVERAGE_GAPS))
@ -159,4 +153,17 @@ final class SampleStratification extends AbstractStratification {
read.setTemporaryAttribute("seen", true);
}
}
public int getNLowCoveredLoci() {
return getCallableStatusCount(CallableStatus.LOW_COVERAGE);
}
public int getNUncoveredLoci() {
return getCallableStatusCount(CallableStatus.COVERAGE_GAPS);
}
private int getCallableStatusCount(CallableStatus status) {
final Integer x = getStatusTally().get(status);
return x == null ? 0 : x;
}
}

View File

@ -47,7 +47,9 @@
package org.broadinstitute.sting.gatk.walkers.diagnostics.diagnosetargets;
import org.broadinstitute.sting.commandline.Argument;
import org.broadinstitute.sting.commandline.Output;
import java.io.PrintStream;
import java.util.LinkedList;
import java.util.List;
@ -114,6 +116,9 @@ final class ThresHolder {
@Argument(fullName = "quality_status_threshold", shortName = "stQ", doc = "The proportion of the loci needed for calling POOR_QUALITY", required = false)
public double qualityStatusThreshold = 0.50;
@Output(fullName = "missing_intervals", shortName = "missing", defaultToStdout = false, doc ="Produces a file with the intervals that don't pass filters", required = false)
public PrintStream missingTargets = null;
public final List<Metric> locusMetricList = new LinkedList<Metric>();
public final List<Metric> sampleMetricList = new LinkedList<Metric>();
public final List<Metric> intervalMetricList = new LinkedList<Metric>();

View File

@ -0,0 +1,110 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.diagnostics.missing;
/**
* Short one line description of the walker.
* <p/>
* <p>
* [Long description of the walker]
* </p>
* <p/>
* <p/>
* <h2>Input</h2>
* <p>
* [Description of the Input]
* </p>
* <p/>
* <h2>Output</h2>
* <p>
* [Description of the Output]
* </p>
* <p/>
* <h2>Examples</h2>
* <pre>
* java
* -jar GenomeAnalysisTK.jar
* -T [walker name]
* </pre>
*
* @author Mauricio Carneiro
* @since 5/1/13
*/
final class Metrics {
private double gccontent;
private double baseQual;
private double mapQual;
private int reads;
private int refs;
void reads(int reads) {this.reads = reads;}
void refs(int refs) {this.refs = refs;}
void gccontent(double gccontent) {this.gccontent = gccontent;}
void baseQual(double baseQual) {this.baseQual = baseQual;}
void mapQual(double mapQual) {this.mapQual = mapQual;}
double gccontent() {return refs > 0 ? gccontent/refs : 0.0;}
double baseQual() {return reads > 0 ? baseQual/reads : 0.0;}
double mapQual() {return reads > 0 ? mapQual/reads : 0.0;}
/**
* Combines two metrics
*
* @param value the other metric to combine
* @return itself, for simple reduce
*/
public Metrics combine(Metrics value) {
this.gccontent += value.gccontent;
this.baseQual += value.baseQual;
this.mapQual += value.mapQual;
this.reads += value.reads;
this.refs += value.refs;
return this;
}
}

View File

@ -0,0 +1,226 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.diagnostics.missing;
import org.broadinstitute.sting.commandline.Argument;
import org.broadinstitute.sting.commandline.Output;
import org.broadinstitute.sting.gatk.CommandLineGATK;
import org.broadinstitute.sting.gatk.contexts.AlignmentContext;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.report.GATKReport;
import org.broadinstitute.sting.gatk.walkers.By;
import org.broadinstitute.sting.gatk.walkers.DataSource;
import org.broadinstitute.sting.gatk.walkers.LocusWalker;
import org.broadinstitute.sting.gatk.walkers.NanoSchedulable;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.GenomeLocSortedSet;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.help.DocumentedGATKFeature;
import org.broadinstitute.sting.utils.help.HelpConstants;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
import org.broadinstitute.sting.utils.text.XReadLines;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.util.List;
/**
* Walks along reference and calculates a few metrics for each interval.
*
* Metrics:
* <ul>
* <li>Average Base Quality</li>
* <li>Average Mapping Quality</li>
* <li>GC Content</li>
* <li>Position in the target</li>
* <li>Coding Sequence / Intron</li>
* <li>Length of the uncovered area</li>
* </ul>
*
* <h3>Input</h3>
* <p>
* A reference file
* </p>
*
* <h3>Output</h3>
* <p>
* GC content calculations per interval.
* </p>
*
* <h3>Example</h3>
* <pre>
* java -Xmx2g -jar GenomeAnalysisTK.jar \
* -T QualifyMissingIntervals \
* -R ref.fasta \
* -o output.grp \
* -L input.intervals \
* -cds cds.intervals \
* -targets targets.intervals
* </pre>
*
*/
@DocumentedGATKFeature( groupName = HelpConstants.DOCS_CAT_QC, extraDocs = {CommandLineGATK.class} )
@By(DataSource.REFERENCE)
public final class QualifyMissingIntervals extends LocusWalker<Metrics, Metrics> implements NanoSchedulable {
@Output
protected PrintStream out;
@Argument(shortName = "targets", required = true)
public File targetsFile;
@Argument(shortName = "cds", required = false)
public File cdsFile;
GATKReport simpleReport;
GenomeLocSortedSet target;
GenomeLocSortedSet cds;
public boolean isReduceByInterval() {
return true;
}
public void initialize() {
simpleReport = GATKReport.newSimpleReport("QualifyMissingIntervals", "IN", "GC", "BQ", "MQ", "TP", "CD", "LN");
final GenomeLocParser parser = getToolkit().getGenomeLocParser();
target = new GenomeLocSortedSet(parser);
cds = new GenomeLocSortedSet(parser);
parseFile(targetsFile, target, parser);
parseFile(cdsFile, cds, parser);
}
public Metrics reduceInit() {
return new Metrics();
}
public Metrics map(RefMetaDataTracker tracker, ReferenceContext ref, AlignmentContext context) {
if (tracker == null)
return null;
final Metrics metrics = new Metrics();
final byte baseIndex = ref.getBase();
final ReadBackedPileup pileup = context.getBasePileup();
final int nBases = pileup.getNumberOfElements();
double baseQual = 0.0;
for (byte qual : pileup.getQuals()) {
baseQual += qual;
}
double mapQual = 0.0;
for (byte qual : pileup.getMappingQuals()) {
mapQual += qual;
}
metrics.baseQual(baseQual);
metrics.mapQual(mapQual);
metrics.gccontent(baseIndex == 'C' || baseIndex == 'G' ? 1.0 : 0.0);
metrics.reads(nBases);
metrics.refs(1);
return metrics;
}
@Override
public Metrics reduce(Metrics value, Metrics sum) {
return sum.combine(value);
}
public void onTraversalDone(List<Pair<GenomeLoc, Metrics>> results) {
for (Pair<GenomeLoc, Metrics> r : results) {
GenomeLoc interval = r.getFirst();
Metrics metrics = r.getSecond();
simpleReport.addRow(
interval.toString(),
metrics.gccontent(),
metrics.baseQual(),
metrics.mapQual(),
getPositionInTarget(interval),
cds.overlaps(interval),
interval.size()
);
}
simpleReport.print(out);
out.close();
}
private static GenomeLoc parseInterval(String s, GenomeLocParser parser) {
if (s.isEmpty()) {
return null;
}
String[] first = s.split(":");
if (first.length == 2) {
String[] second = first[1].split("\\-");
return parser.createGenomeLoc(first[0], Integer.decode(second[0]), Integer.decode(second[1]));
} else {
throw new UserException.BadInput("Interval doesn't parse correctly: " + s);
}
}
private void parseFile(File file, GenomeLocSortedSet set, GenomeLocParser parser) {
try {
for (String s : new XReadLines(file) ) {
GenomeLoc interval = parseInterval(s, parser);
if (interval != null)
set.add(interval, true);
}
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
private int getPositionInTarget(GenomeLoc interval) {
final List<GenomeLoc> hits = target.getOverlapping(interval);
int result = 0;
for (GenomeLoc hit : hits) {
result = interval.getStart() - hit.getStart(); // if there are multiple hits, we'll get the last one.
}
return result;
}
}

View File

@ -76,7 +76,8 @@ public class IndelGenotypeLikelihoodsCalculationModel extends GenotypeLikelihood
private List<Allele> alleleList = new ArrayList<Allele>();
protected IndelGenotypeLikelihoodsCalculationModel(UnifiedArgumentCollection UAC, Logger logger) {
protected IndelGenotypeLikelihoodsCalculationModel(final UnifiedArgumentCollection UAC,
final Logger logger) {
super(UAC, logger);
pairModel = new PairHMMIndelErrorModel(UAC.INDEL_GAP_OPEN_PENALTY, UAC.INDEL_GAP_CONTINUATION_PENALTY,
UAC.OUTPUT_DEBUG_INDEL_INFO, UAC.pairHMM);
@ -85,10 +86,11 @@ public class IndelGenotypeLikelihoodsCalculationModel extends GenotypeLikelihood
ignoreSNPAllelesWhenGenotypingIndels = UAC.IGNORE_SNP_ALLELES;
}
protected static List<Allele> computeConsensusAlleles(ReferenceContext ref,
Map<String, AlignmentContext> contexts,
AlignmentContextUtils.ReadOrientation contextType,
GenomeLocParser locParser, UnifiedArgumentCollection UAC) {
protected static List<Allele> computeConsensusAlleles(final ReferenceContext ref,
final Map<String, AlignmentContext> contexts,
final AlignmentContextUtils.ReadOrientation contextType,
final GenomeLocParser locParser,
final UnifiedArgumentCollection UAC) {
ConsensusAlleleCounter counter = new ConsensusAlleleCounter(locParser, true, UAC.MIN_INDEL_COUNT_FOR_GENOTYPING, UAC.MIN_INDEL_FRACTION_PER_SAMPLE);
return counter.computeConsensusAlleles(ref, contexts, contextType);
}

View File

@ -147,9 +147,17 @@ public class SNPGenotypeLikelihoodsCalculationModel extends GenotypeLikelihoodsC
// if we only want variants, then we don't need to calculate genotype likelihoods
if ( UAC.OutputMode == UnifiedGenotyperEngine.OUTPUT_MODE.EMIT_VARIANTS_ONLY )
return builder.make();
// if user requires all PLs at all sites, add all possible alt alleles
else if (UAC.annotateAllSitesWithPLs) {
for ( final byte base : BaseUtils.BASES ) {
if ( base != refBase )
alleles.add(Allele.create(base));
}
}
// otherwise, choose any alternate allele (it doesn't really matter)
alleles.add(Allele.create(BaseUtils.baseIndexToSimpleBase(indexOfRefBase == 0 ? 1 : 0)));
else
// otherwise, choose any alternate allele (it doesn't really matter)
alleles.add(Allele.create(BaseUtils.baseIndexToSimpleBase(indexOfRefBase == 0 ? 1 : 0)));
}
}

View File

@ -52,6 +52,9 @@ import org.broadinstitute.sting.utils.pairhmm.PairHMM;
import org.broadinstitute.sting.utils.variant.GATKVariantContextUtils;
import org.broadinstitute.variant.variantcontext.VariantContext;
import java.util.Collections;
import java.util.List;
public class UnifiedArgumentCollection extends StandardCallerArgumentCollection {
@Argument(fullName = "genotype_likelihoods_model", shortName = "glm", doc = "Genotype likelihoods calculation model to employ -- SNP is the default option, while INDEL is also available for calling indels and BOTH is available for calling both together", required = false)
@ -82,7 +85,7 @@ public class UnifiedArgumentCollection extends StandardCallerArgumentCollection
* The PairHMM implementation to use for -glm INDEL genotype likelihood calculations. The various implementations balance a tradeoff of accuracy and runtime.
*/
@Argument(fullName = "pair_hmm_implementation", shortName = "pairHMM", doc = "The PairHMM implementation to use for -glm INDEL genotype likelihood calculations", required = false)
public PairHMM.HMM_IMPLEMENTATION pairHMM = PairHMM.HMM_IMPLEMENTATION.ORIGINAL;
public PairHMM.HMM_IMPLEMENTATION pairHMM = PairHMM.HMM_IMPLEMENTATION.LOGLESS_CACHING;
/**
* The minimum confidence needed in a given base for it to be used in variant calling. Note that the base quality of a base
@ -95,6 +98,18 @@ public class UnifiedArgumentCollection extends StandardCallerArgumentCollection
@Argument(fullName = "max_deletion_fraction", shortName = "deletions", doc = "Maximum fraction of reads with deletions spanning this locus for it to be callable [to disable, set to < 0 or > 1; default:0.05]", required = false)
public Double MAX_DELETION_FRACTION = 0.05;
/**
* Advanced, experimental argument: if SNP likelihood model is specified, and if EMIT_ALL_SITES output mode is set, when we set this argument then we will also emit PLs at all sites.
* This will give a measure of reference confidence and a measure of which alt alleles are more plausible (if any).
* WARNINGS:
* - This feature will inflate VCF file size considerably.
* - All SNP ALT alleles will be emitted with corresponding 10 PL values.
* - An error will be emitted if EMIT_ALL_SITES is not set, or if anything other than diploid SNP model is used
*/
@Advanced
@Argument(fullName = "allSitePLs", shortName = "allSitePLs", doc = "Annotate all sites with PLs", required = false)
public boolean annotateAllSitesWithPLs = false;
// indel-related arguments
/**
* A candidate indel is genotyped (and potentially called) if there are this number of reads with a consensus indel at a site.
@ -247,7 +262,7 @@ public class UnifiedArgumentCollection extends StandardCallerArgumentCollection
this.EXCLUDE_FILTERED_REFERENCE_SITES = uac.EXCLUDE_FILTERED_REFERENCE_SITES;
this.IGNORE_LANE_INFO = uac.IGNORE_LANE_INFO;
this.pairHMM = uac.pairHMM;
this.annotateAllSitesWithPLs = uac.annotateAllSitesWithPLs;
// todo- arguments to remove
this.IGNORE_SNP_ALLELES = uac.IGNORE_SNP_ALLELES;
}

View File

@ -83,6 +83,9 @@ public class UnifiedGenotyperEngine {
public static final double HUMAN_SNP_HETEROZYGOSITY = 1e-3;
public static final double HUMAN_INDEL_HETEROZYGOSITY = 1e-4;
private static final int SNP_MODEL = 0;
private static final int INDEL_MODEL = 1;
public enum OUTPUT_MODE {
/** produces calls only at variant sites */
EMIT_VARIANTS_ONLY,
@ -165,6 +168,13 @@ public class UnifiedGenotyperEngine {
filter.add(LOW_QUAL_FILTER_NAME);
determineGLModelsToUse();
// do argument checking
if (UAC.annotateAllSitesWithPLs) {
if (!modelsToUse.contains(GenotypeLikelihoodsCalculationModel.Model.SNP))
throw new IllegalArgumentException("Invalid genotype likelihood model specification: Only diploid SNP model can be used in conjunction with option allSitePLs");
}
}
/**
@ -436,7 +446,8 @@ public class UnifiedGenotyperEngine {
bestGuessIsRef = false;
}
// if in GENOTYPE_GIVEN_ALLELES mode, we still want to allow the use of a poor allele
else if ( UAC.GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ) {
else if ( UAC.GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ||
UAC.annotateAllSitesWithPLs) {
myAlleles.add(alternateAllele);
alleleCountsofMLE.add(AFresult.getAlleleCountAtMLE(alternateAllele));
}
@ -446,7 +457,7 @@ public class UnifiedGenotyperEngine {
// note the math.abs is necessary because -10 * 0.0 => -0.0 which isn't nice
final double phredScaledConfidence =
Math.abs(! bestGuessIsRef || UAC.GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES
Math.abs(! bestGuessIsRef || UAC.GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES || UAC.annotateAllSitesWithPLs
? -10 * AFresult.getLog10PosteriorOfAFEq0()
: -10 * AFresult.getLog10PosteriorOfAFGT0());
@ -540,11 +551,6 @@ public class UnifiedGenotyperEngine {
builder.attributes(attributes);
VariantContext vcCall = builder.make();
// if we are subsetting alleles (either because there were too many or because some were not polymorphic)
// then we may need to trim the alleles (because the original VariantContext may have had to pad at the end).
if ( myAlleles.size() != vc.getAlleles().size() && !limitedContext ) // limitedContext callers need to handle allele trimming on their own to keep their perReadAlleleLikelihoodMap alleles in sync
vcCall = GATKVariantContextUtils.reverseTrimAlleles(vcCall);
if ( annotationEngine != null && !limitedContext ) { // limitedContext callers need to handle annotations on their own by calling their own annotationEngine
// Note: we want to use the *unfiltered* and *unBAQed* context for the annotations
final ReadBackedPileup pileup = rawContext.getBasePileup();
@ -553,6 +559,11 @@ public class UnifiedGenotyperEngine {
vcCall = annotationEngine.annotateContext(tracker, refContext, stratifiedContexts, vcCall, perReadAlleleLikelihoodMap);
}
// if we are subsetting alleles (either because there were too many or because some were not polymorphic)
// then we may need to trim the alleles (because the original VariantContext may have had to pad at the end).
if ( myAlleles.size() != vc.getAlleles().size() && !limitedContext ) // limitedContext callers need to handle allele trimming on their own to keep their perReadAlleleLikelihoodMap alleles in sync
vcCall = GATKVariantContextUtils.reverseTrimAlleles(vcCall);
return new VariantCallContext(vcCall, confidentlyCalled(phredScaledConfidence, PoFGT0));
}
@ -693,13 +704,13 @@ public class UnifiedGenotyperEngine {
}
private void determineGLModelsToUse() {
String modelPrefix = "";
if ( !UAC.GLmodel.name().contains(GPSTRING) && UAC.samplePloidy != GATKVariantContextUtils.DEFAULT_PLOIDY )
modelPrefix = GPSTRING;
if ( UAC.GLmodel.name().toUpperCase().contains("BOTH") ) {
modelPrefix += UAC.GLmodel.name().toUpperCase().replaceAll("BOTH","");
// GGA mode => must initialize both the SNP and indel models
if ( UAC.GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ||
UAC.GLmodel.name().toUpperCase().contains("BOTH") ) {
modelsToUse.add(GenotypeLikelihoodsCalculationModel.Model.valueOf(modelPrefix+"SNP"));
modelsToUse.add(GenotypeLikelihoodsCalculationModel.Model.valueOf(modelPrefix+"INDEL"));
}
@ -712,31 +723,24 @@ public class UnifiedGenotyperEngine {
private List<GenotypeLikelihoodsCalculationModel.Model> getGLModelsToUse(final RefMetaDataTracker tracker,
final ReferenceContext refContext,
final AlignmentContext rawContext) {
if ( UAC.GenotypingMode != GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES )
return modelsToUse;
if ( modelsToUse.size() != 2 )
throw new IllegalStateException("GGA mode assumes that we have initialized both the SNP and indel models but found " + modelsToUse);
// if we're genotyping given alleles then we need to choose the model corresponding to the variant type requested
final List<GenotypeLikelihoodsCalculationModel.Model> GGAmodel = new ArrayList<GenotypeLikelihoodsCalculationModel.Model>(1);
final VariantContext vcInput = getVCFromAllelesRod(tracker, refContext, rawContext.getLocation(), false, logger, UAC.alleles);
if ( vcInput == null )
return GGAmodel; // no work to be done
if ( vcInput.isSNP() ) {
// use the SNP model unless the user chose INDEL mode only
if ( modelsToUse.size() == 2 || modelsToUse.get(0).name().endsWith("SNP") )
GGAmodel.add(modelsToUse.get(0));
if ( vcInput == null ) {
return Collections.emptyList(); // no work to be done
} else if ( vcInput.isSNP() ) {
return Collections.singletonList(modelsToUse.get(SNP_MODEL));
} else if ( vcInput.isIndel() || vcInput.isMixed() ) {
return Collections.singletonList(modelsToUse.get(INDEL_MODEL));
} else {
return Collections.emptyList(); // No support for other types yet
}
else if ( vcInput.isIndel() || vcInput.isMixed() ) {
// use the INDEL model unless the user chose SNP mode only
if ( modelsToUse.size() == 2 )
GGAmodel.add(modelsToUse.get(1));
else if ( modelsToUse.get(0).name().endsWith("INDEL") )
GGAmodel.add(modelsToUse.get(0));
}
// No support for other types yet
return GGAmodel;
}
/**

View File

@ -106,7 +106,7 @@ public abstract class DiploidExactAFCalc extends ExactAFCalc {
alleles.add(vc.getReference());
alleles.addAll(chooseMostLikelyAlternateAlleles(vc, getMaxAltAlleles()));
builder.alleles(alleles);
builder.genotypes(GATKVariantContextUtils.subsetDiploidAlleles(vc, alleles, false));
builder.genotypes(GATKVariantContextUtils.subsetDiploidAlleles(vc, alleles, GATKVariantContextUtils.GenotypeAssignmentMethod.SET_TO_NO_CALL));
return builder.make();
} else {
return vc;
@ -352,6 +352,9 @@ public abstract class DiploidExactAFCalc extends ExactAFCalc {
final List<Allele> allelesToUse,
final boolean assignGenotypes,
final int ploidy) {
return GATKVariantContextUtils.subsetDiploidAlleles(vc, allelesToUse, assignGenotypes);
return allelesToUse.size() == 1
? GATKVariantContextUtils.subsetToRefOnly(vc, ploidy)
: GATKVariantContextUtils.subsetDiploidAlleles(vc, allelesToUse,
assignGenotypes ? GATKVariantContextUtils.GenotypeAssignmentMethod.USE_PLS_TO_ASSIGN : GATKVariantContextUtils.GenotypeAssignmentMethod.SET_TO_NO_CALL);
}
}

View File

@ -0,0 +1,142 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
import org.broadinstitute.variant.variantcontext.VariantContext;
import java.util.LinkedList;
import java.util.List;
import java.util.TreeSet;
/**
* Trim down an active region based on a set of variants found across the haplotypes within the region
*
* User: depristo
* Date: 4/27/13
* Time: 2:10 PM
*/
class ActiveRegionTrimmer {
private final static Logger logger = Logger.getLogger(ActiveRegionTrimmer.class);
private final boolean logTrimming;
private final int snpPadding, nonSnpPadding, maxDistanceInExtensionForGenotyping;
private final GenomeLocParser parser;
/**
* Create a new ActiveRegionTrimmer
*
* @param logTrimming should we log our trimming events?
* @param snpPadding how much bp context should we ensure around snps?
* @param nonSnpPadding how much bp context should we ensure around anything not a snp?
* @param maxDistanceInExtensionForGenotyping the max extent we are will to go into the extended region of the
* origin active region in order to properly genotype events in the
* non-extended active region?
* @param parser a genome loc parser so we can create genome locs
*/
ActiveRegionTrimmer(boolean logTrimming, int snpPadding, int nonSnpPadding, int maxDistanceInExtensionForGenotyping, GenomeLocParser parser) {
if ( snpPadding < 0 ) throw new IllegalArgumentException("snpPadding must be >= 0 but got " + snpPadding);
if ( nonSnpPadding < 0 ) throw new IllegalArgumentException("nonSnpPadding must be >= 0 but got " + nonSnpPadding);
if ( maxDistanceInExtensionForGenotyping < 0 ) throw new IllegalArgumentException("maxDistanceInExtensionForGenotyping must be >= 0 but got " + maxDistanceInExtensionForGenotyping);
if ( parser == null ) throw new IllegalArgumentException("parser cannot be null");
this.logTrimming = logTrimming;
this.snpPadding = snpPadding;
this.nonSnpPadding = nonSnpPadding;
this.maxDistanceInExtensionForGenotyping = maxDistanceInExtensionForGenotyping;
this.parser = parser;
}
/**
* Trim down the active region to a region large enough to properly genotype the events found within the active
* region span, excluding all variants that only occur within its extended span.
*
* This function merely creates the region, but it doesn't populate the reads back into the region.
*
* @param region our full active region
* @param allVariantsWithinExtendedRegion all of the variants found in the entire region, sorted by their start position
* @return a new ActiveRegion trimmed down to just what's needed for genotyping, or null if we couldn't do this successfully
*/
public ActiveRegion trimRegion(final ActiveRegion region, final TreeSet<VariantContext> allVariantsWithinExtendedRegion) {
if ( allVariantsWithinExtendedRegion.isEmpty() ) // no variants, so just return the current region
return null;
final List<VariantContext> withinActiveRegion = new LinkedList<VariantContext>();
int pad = snpPadding;
GenomeLoc trimLoc = null;
for ( final VariantContext vc : allVariantsWithinExtendedRegion ) {
final GenomeLoc vcLoc = parser.createGenomeLoc(vc);
if ( region.getLocation().overlapsP(vcLoc) ) {
if ( ! vc.isSNP() ) // if anything isn't a SNP use the bigger padding
pad = nonSnpPadding;
trimLoc = trimLoc == null ? vcLoc : trimLoc.endpointSpan(vcLoc);
withinActiveRegion.add(vc);
}
}
// we don't actually have anything in the region after removing variants that don't overlap the region's full location
if ( trimLoc == null ) return null;
final GenomeLoc maxSpan = parser.createPaddedGenomeLoc(region.getLocation(), maxDistanceInExtensionForGenotyping);
final GenomeLoc idealSpan = parser.createPaddedGenomeLoc(trimLoc, pad);
final GenomeLoc finalSpan = maxSpan.intersect(idealSpan);
final ActiveRegion trimmedRegion = region.trim(finalSpan);
if ( logTrimming ) {
logger.info("events : " + withinActiveRegion);
logger.info("trimLoc : " + trimLoc);
logger.info("pad : " + pad);
logger.info("idealSpan : " + idealSpan);
logger.info("maxSpan : " + maxSpan);
logger.info("finalSpan : " + finalSpan);
logger.info("regionSpan : " + trimmedRegion.getExtendedLoc() + " size is " + trimmedRegion.getExtendedLoc().size());
}
return trimmedRegion;
}
}

View File

@ -46,102 +46,55 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.*;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.DeBruijnGraph;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.SeqGraph;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.smithwaterman.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.sting.utils.smithwaterman.SWParameterSet;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import java.io.File;
import java.util.*;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
/**
* Created by IntelliJ IDEA.
* DeBruijn assembler for the HaplotypeCaller
*
* User: ebanks, rpoplin
* Date: Mar 14, 2011
*/
public class DeBruijnAssembler extends LocalAssemblyEngine {
private final static Logger logger = Logger.getLogger(DeBruijnAssembler.class);
private static final int KMER_OVERLAP = 5; // the additional size of a valid chunk of sequence, used to string together k-mers
// TODO -- this number is very low, and limits our ability to explore low-frequency variants. It should
// TODO -- be increased to a large number of eliminated altogether when moving to the bubble caller where
// TODO -- we are no longer considering a combinatorial number of haplotypes as the number of bubbles increases
private static final int NUM_BEST_PATHS_PER_KMER_GRAPH = 25;
private final static int NUM_PATHS_PER_GRAPH = 25;
private static final int KMER_OVERLAP = 5; // the additional size of a valid chunk of sequence, used to string together k-mers
private static final int GRAPH_KMER_STEP = 6;
private static final int GGA_MODE_ARTIFICIAL_COUNTS = 1000;
private final boolean debug;
private final boolean debugGraphTransformations;
private final int minKmer;
private final boolean allowCyclesInKmerGraphToGeneratePaths;
private final int onlyBuildKmersOfThisSizeWhenDebuggingGraphAlgorithms;
protected DeBruijnAssembler() {
this(false, -1, 11, false);
this(25, -1);
}
public DeBruijnAssembler(final boolean debug,
final int debugGraphTransformations,
final int minKmer,
final boolean allowCyclesInKmerGraphToGeneratePaths) {
super();
this.debug = debug;
this.debugGraphTransformations = debugGraphTransformations > 0;
this.onlyBuildKmersOfThisSizeWhenDebuggingGraphAlgorithms = debugGraphTransformations;
public DeBruijnAssembler(final int minKmer, final int onlyBuildKmersOfThisSizeWhenDebuggingGraphAlgorithms) {
super(NUM_PATHS_PER_GRAPH);
this.minKmer = minKmer;
this.allowCyclesInKmerGraphToGeneratePaths = allowCyclesInKmerGraphToGeneratePaths;
this.onlyBuildKmersOfThisSizeWhenDebuggingGraphAlgorithms = onlyBuildKmersOfThisSizeWhenDebuggingGraphAlgorithms;
}
/**
* Main entry point into the assembly engine. Build a set of deBruijn graphs out of the provided reference sequence and list of reads
* @param activeRegion ActiveRegion object holding the reads which are to be used during assembly
* @param refHaplotype reference haplotype object
* @param fullReferenceWithPadding byte array holding the reference sequence with padding
* @param refLoc GenomeLoc object corresponding to the reference sequence with padding
* @param activeAllelesToGenotype the alleles to inject into the haplotypes during GGA mode
* @return a non-empty list of all the haplotypes that are produced during assembly
*/
@Ensures({"result.contains(refHaplotype)"})
public List<Haplotype> runLocalAssembly( final ActiveRegion activeRegion, final Haplotype refHaplotype, final byte[] fullReferenceWithPadding, final GenomeLoc refLoc, final List<VariantContext> activeAllelesToGenotype ) {
if( activeRegion == null ) { throw new IllegalArgumentException("Assembly engine cannot be used with a null ActiveRegion."); }
if( refHaplotype == null ) { throw new IllegalArgumentException("Reference haplotype cannot be null."); }
if( fullReferenceWithPadding.length != refLoc.size() ) { throw new IllegalArgumentException("Reference bases and reference loc must be the same size."); }
if( pruneFactor < 0 ) { throw new IllegalArgumentException("Pruning factor cannot be negative"); }
// create the graphs
final List<SeqGraph> graphs = createDeBruijnGraphs( activeRegion.getReads(), refHaplotype );
// print the graphs if the appropriate debug option has been turned on
if( graphWriter != null ) {
printGraphs(graphs);
}
// find the best paths in the graphs and return them as haplotypes
return findBestPaths( graphs, refHaplotype, fullReferenceWithPadding, refLoc, activeAllelesToGenotype, activeRegion.getExtendedLoc() );
}
@Requires({"reads != null", "refHaplotype != null"})
protected List<SeqGraph> createDeBruijnGraphs( final List<GATKSAMRecord> reads, final Haplotype refHaplotype ) {
final List<SeqGraph> graphs = new LinkedList<SeqGraph>();
@Override
protected List<SeqGraph> assemble(final List<GATKSAMRecord> reads, final Haplotype refHaplotype, final List<Haplotype> activeAlleleHaplotypes ) {
final List<SeqGraph> graphs = new LinkedList<>();
final int maxKmer = ReadUtils.getMaxReadLength(reads) - KMER_OVERLAP - 1;
if( maxKmer < minKmer) {
@ -154,7 +107,7 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
continue;
if ( debug ) logger.info("Creating de Bruijn graph for " + kmer + " kmer using " + reads.size() + " reads");
DeBruijnGraph graph = createGraphFromSequences( reads, kmer, refHaplotype);
DeBruijnGraph graph = createGraphFromSequences(reads, kmer, refHaplotype, activeAlleleHaplotypes);
if( graph != null ) { // graphs that fail during creation ( for example, because there are cycles in the reference graph ) will show up here as a null graph object
// do a series of steps to clean up the raw assembly graph to make it analysis-ready
if ( debugGraphTransformations ) graph.printGraph(new File("unpruned.dot"), pruneFactor);
@ -165,10 +118,9 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
" future subsystem will actually go and error correct the reads");
}
final SeqGraph seqGraph = toSeqGraph(graph);
final SeqGraph seqGraph = cleanupSeqGraph(graph.convertToSequenceGraph());
if ( seqGraph != null ) { // if the graph contains interesting variation from the reference
sanityCheckReferenceGraph(seqGraph, refHaplotype);
graphs.add(seqGraph);
if ( debugGraphTransformations ) // we only want to use one graph size
@ -181,71 +133,8 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
return graphs;
}
private SeqGraph toSeqGraph(final DeBruijnGraph deBruijnGraph) {
final SeqGraph seqGraph = deBruijnGraph.convertToSequenceGraph();
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.1.dot"), pruneFactor);
// TODO -- we need to come up with a consistent pruning algorithm. The current pruning algorithm
// TODO -- works well but it doesn't differentiate between an isolated chain that doesn't connect
// TODO -- to anything from one that's actually has good support along the chain but just happens
// TODO -- to have a connection in the middle that has weight of < pruneFactor. Ultimately
// TODO -- the pruning algorithm really should be an error correction algorithm that knows more
// TODO -- about the structure of the data and can differentiate between an infrequent path but
// TODO -- without evidence against it (such as occurs when a region is hard to get any reads through)
// TODO -- from a error with lots of weight going along another similar path
// the very first thing we need to do is zip up the graph, or pruneGraph will be too aggressive
seqGraph.zipLinearChains();
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.2.zipped.dot"), pruneFactor);
// now go through and prune the graph, removing vertices no longer connected to the reference chain
// IMPORTANT: pruning must occur before we call simplifyGraph, as simplifyGraph adds 0 weight
// edges to maintain graph connectivity.
seqGraph.pruneGraph(pruneFactor);
seqGraph.removeVerticesNotConnectedToRefRegardlessOfEdgeDirection();
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.3.pruned.dot"), pruneFactor);
seqGraph.simplifyGraph();
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.4.merged.dot"), pruneFactor);
// The graph has degenerated in some way, so the reference source and/or sink cannot be id'd. Can
// happen in cases where for example the reference somehow manages to acquire a cycle, or
// where the entire assembly collapses back into the reference sequence.
if ( seqGraph.getReferenceSourceVertex() == null || seqGraph.getReferenceSinkVertex() == null )
return null;
seqGraph.removePathsNotConnectedToRef();
seqGraph.simplifyGraph();
if ( seqGraph.vertexSet().size() == 1 ) {
// we've perfectly assembled into a single reference haplotype, add a empty seq vertex to stop
// the code from blowing up.
// TODO -- ref properties should really be on the vertices, not the graph itself
final SeqVertex complete = seqGraph.vertexSet().iterator().next();
final SeqVertex dummy = new SeqVertex("");
seqGraph.addVertex(dummy);
seqGraph.addEdge(complete, dummy, new BaseEdge(true, 0));
}
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.5.final.dot"), pruneFactor);
return seqGraph;
}
protected <T extends BaseVertex> void sanityCheckReferenceGraph(final BaseGraph<T> graph, final Haplotype refHaplotype) {
if( graph.getReferenceSourceVertex() == null ) {
throw new IllegalStateException("All reference graphs must have a reference source vertex.");
}
if( graph.getReferenceSinkVertex() == null ) {
throw new IllegalStateException("All reference graphs must have a reference sink vertex.");
}
if( !Arrays.equals(graph.getReferenceBytes(graph.getReferenceSourceVertex(), graph.getReferenceSinkVertex(), true, true), refHaplotype.getBases()) ) {
throw new IllegalStateException("Mismatch between the reference haplotype and the reference assembly graph path." +
" graph = " + new String(graph.getReferenceBytes(graph.getReferenceSourceVertex(), graph.getReferenceSinkVertex(), true, true)) +
" haplotype = " + new String(refHaplotype.getBases())
);
}
}
@Requires({"reads != null", "kmerLength > 0", "refHaplotype != null"})
protected DeBruijnGraph createGraphFromSequences( final List<GATKSAMRecord> reads, final int kmerLength, final Haplotype refHaplotype ) {
protected DeBruijnGraph createGraphFromSequences( final List<GATKSAMRecord> reads, final int kmerLength, final Haplotype refHaplotype, final List<Haplotype> activeAlleleHaplotypes ) {
final DeBruijnGraph graph = new DeBruijnGraph(kmerLength);
final DeBruijnGraphBuilder builder = new DeBruijnGraphBuilder(graph);
@ -254,6 +143,11 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
// something went wrong, so abort right now with a null graph
return null;
// add the artificial GGA haplotypes to the graph
if ( ! addGGAKmersToGraph(builder, activeAlleleHaplotypes) )
// something went wrong, so abort right now with a null graph
return null;
// now go through the graph already seeded with the reference sequence and add the read kmers to it
if ( ! addReadKmersToGraph(builder, reads) )
// some problem was detected adding the reads to the graph, return null to indicate we failed
@ -263,6 +157,28 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
return graph;
}
/**
* Add the high-quality kmers from the artificial GGA haplotypes to the graph
*
* @param builder a debruijn graph builder to add the read kmers to
* @param activeAlleleHaplotypes a list of haplotypes to add to the graph for GGA mode
* @return true if we successfully added the read kmers to the graph without corrupting it in some way
*/
protected boolean addGGAKmersToGraph(final DeBruijnGraphBuilder builder, final List<Haplotype> activeAlleleHaplotypes) {
final int kmerLength = builder.getKmerSize();
for( final Haplotype haplotype : activeAlleleHaplotypes ) {
final int end = haplotype.length() - kmerLength;
for( int start = 0; start < end; start++ ) {
builder.addKmerPairFromSeqToGraph( haplotype.getBases(), start, GGA_MODE_ARTIFICIAL_COUNTS );
}
}
// always returns true now, but it's possible that we'd add kmers and decide we don't like the graph in some way
return true;
}
/**
* Add the high-quality kmers from the reads to the graph
*
@ -344,290 +260,10 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
return true;
}
protected void printGraphs(final List<SeqGraph> graphs) {
final int writeFirstGraphWithSizeSmallerThan = 50;
graphWriter.println("digraph assemblyGraphs {");
for( final SeqGraph graph : graphs ) {
if ( debugGraphTransformations && graph.getKmerSize() >= writeFirstGraphWithSizeSmallerThan ) {
logger.info("Skipping writing of graph with kmersize " + graph.getKmerSize());
continue;
}
graph.printGraph(graphWriter, false, pruneFactor);
if ( debugGraphTransformations )
break;
}
graphWriter.println("}");
}
@Requires({"refWithPadding.length > refHaplotype.getBases().length", "refLoc.containsP(activeRegionWindow)"})
@Ensures({"result.contains(refHaplotype)"})
private List<Haplotype> findBestPaths( final List<SeqGraph> graphs, final Haplotype refHaplotype, final byte[] refWithPadding, final GenomeLoc refLoc, final List<VariantContext> activeAllelesToGenotype, final GenomeLoc activeRegionWindow ) {
// add the reference haplotype separately from all the others to ensure that it is present in the list of haplotypes
// TODO -- this use of an array with contains lower may be a performance problem returning in an O(N^2) algorithm
final List<Haplotype> returnHaplotypes = new ArrayList<Haplotype>();
refHaplotype.setAlignmentStartHapwrtRef(activeRegionWindow.getStart() - refLoc.getStart());
final Cigar c = new Cigar();
c.add(new CigarElement(refHaplotype.getBases().length, CigarOperator.M));
refHaplotype.setCigar(c);
returnHaplotypes.add( refHaplotype );
final int activeRegionStart = refHaplotype.getAlignmentStartHapwrtRef();
final int activeRegionStop = refHaplotype.getAlignmentStartHapwrtRef() + refHaplotype.getCigar().getReferenceLength();
// for GGA mode, add the desired allele into the haplotype
for( final VariantContext compVC : activeAllelesToGenotype ) {
for( final Allele compAltAllele : compVC.getAlternateAlleles() ) {
final Haplotype insertedRefHaplotype = refHaplotype.insertAllele(compVC.getReference(), compAltAllele, activeRegionStart + compVC.getStart() - activeRegionWindow.getStart(), compVC.getStart());
addHaplotypeForGGA( insertedRefHaplotype, refWithPadding, returnHaplotypes, activeRegionStart, activeRegionStop, true );
}
}
for( final SeqGraph graph : graphs ) {
final SeqVertex source = graph.getReferenceSourceVertex();
final SeqVertex sink = graph.getReferenceSinkVertex();
if ( source == null || sink == null ) throw new IllegalArgumentException("Both source and sink cannot be null but got " + source + " and sink " + sink + " for graph "+ graph);
final KBestPaths<SeqVertex> pathFinder = new KBestPaths<SeqVertex>(allowCyclesInKmerGraphToGeneratePaths);
for ( final Path<SeqVertex> path : pathFinder.getKBestPaths(graph, NUM_BEST_PATHS_PER_KMER_GRAPH, source, sink) ) {
// logger.info("Found path " + path);
Haplotype h = new Haplotype( path.getBases() );
if( !returnHaplotypes.contains(h) ) {
final Cigar cigar = path.calculateCigar();
if( cigar.isEmpty() ) {
throw new IllegalStateException("Smith-Waterman alignment failure. Cigar = " + cigar + " with reference length " + cigar.getReferenceLength() + " but expecting reference length of " + refHaplotype.getCigar().getReferenceLength());
} else if ( pathIsTooDivergentFromReference(cigar) || cigar.getReferenceLength() < 60 ) { // N cigar elements means that a bubble was too divergent from the reference so skip over this path
continue;
} else if( cigar.getReferenceLength() != refHaplotype.getCigar().getReferenceLength() ) { // SW failure
throw new IllegalStateException("Smith-Waterman alignment failure. Cigar = " + cigar + " with reference length " + cigar.getReferenceLength() + " but expecting reference length of " + refHaplotype.getCigar().getReferenceLength());
}
h.setCigar(cigar);
// extend partial haplotypes which are anchored in the reference to include the full active region
h = extendPartialHaplotype(h, activeRegionStart, refWithPadding);
final Cigar leftAlignedCigar = leftAlignCigarSequentially(AlignmentUtils.consolidateCigar(h.getCigar()), refWithPadding, h.getBases(), activeRegionStart, 0);
if( leftAlignedCigar.getReferenceLength() != refHaplotype.getCigar().getReferenceLength() ) { // left alignment failure
continue;
}
if( !returnHaplotypes.contains(h) ) {
h.setAlignmentStartHapwrtRef(activeRegionStart);
h.setCigar(leftAlignedCigar);
h.setScore(path.getScore());
returnHaplotypes.add(h);
if ( debug )
logger.info("Adding haplotype " + h.getCigar() + " from debruijn graph with kmer " + graph.getKmerSize());
// for GGA mode, add the desired allele into the haplotype if it isn't already present
if( !activeAllelesToGenotype.isEmpty() ) {
final Map<Integer,VariantContext> eventMap = GenotypingEngine.generateVCsFromAlignment( h, refWithPadding, refLoc, "HCassembly" ); // BUGBUG: need to put this function in a shared place
for( final VariantContext compVC : activeAllelesToGenotype ) { // for GGA mode, add the desired allele into the haplotype if it isn't already present
final VariantContext vcOnHaplotype = eventMap.get(compVC.getStart());
// This if statement used to additionally have:
// "|| !vcOnHaplotype.hasSameAllelesAs(compVC)"
// but that can lead to problems downstream when e.g. you are injecting a 1bp deletion onto
// a haplotype that already contains a 1bp insertion (so practically it is reference but
// falls into the bin for the 1bp deletion because we keep track of the artificial alleles).
if( vcOnHaplotype == null ) {
for( final Allele compAltAllele : compVC.getAlternateAlleles() ) {
addHaplotypeForGGA( h.insertAllele(compVC.getReference(), compAltAllele, activeRegionStart + compVC.getStart() - activeRegionWindow.getStart(), compVC.getStart()), refWithPadding, returnHaplotypes, activeRegionStart, activeRegionStop, false );
}
}
}
}
}
}
}
}
// add genome locs to the haplotypes
for ( final Haplotype h : returnHaplotypes ) h.setGenomeLocation(activeRegionWindow);
if ( returnHaplotypes.size() < returnHaplotypes.size() )
logger.info("Found " + returnHaplotypes.size() + " candidate haplotypes of " + returnHaplotypes.size() + " possible combinations to evaluate every read against at " + refLoc);
if( debug ) {
if( returnHaplotypes.size() > 1 ) {
logger.info("Found " + returnHaplotypes.size() + " candidate haplotypes of " + returnHaplotypes.size() + " possible combinations to evaluate every read against.");
} else {
logger.info("Found only the reference haplotype in the assembly graph.");
}
for( final Haplotype h : returnHaplotypes ) {
logger.info( h.toString() );
logger.info( "> Cigar = " + h.getCigar() + " : " + h.getCigar().getReferenceLength() + " score " + h.getScore() );
}
}
return returnHaplotypes;
}
/**
* Extend partial haplotypes which are anchored in the reference to include the full active region
* @param haplotype the haplotype to extend
* @param activeRegionStart the place where the active region starts in the ref byte array
* @param refWithPadding the full reference byte array with padding which encompasses the active region
* @return a haplotype fully extended to encompass the active region
*/
@Requires({"haplotype != null", "activeRegionStart >= 0", "refWithPadding != null", "refWithPadding.length > 0"})
@Ensures({"result != null", "result.getCigar() != null"})
private Haplotype extendPartialHaplotype( final Haplotype haplotype, final int activeRegionStart, final byte[] refWithPadding ) {
final Cigar cigar = haplotype.getCigar();
final Cigar newCigar = new Cigar();
byte[] newHaplotypeBases = haplotype.getBases();
int refPos = activeRegionStart;
int hapPos = 0;
for( int iii = 0; iii < cigar.getCigarElements().size(); iii++ ) {
final CigarElement ce = cigar.getCigarElement(iii);
switch (ce.getOperator()) {
case M:
refPos += ce.getLength();
hapPos += ce.getLength();
newCigar.add(ce);
break;
case I:
hapPos += ce.getLength();
newCigar.add(ce);
break;
case D:
if( iii == 0 || iii == cigar.getCigarElements().size() - 1 ) {
newHaplotypeBases = ArrayUtils.addAll( Arrays.copyOfRange(newHaplotypeBases, 0, hapPos),
ArrayUtils.addAll(Arrays.copyOfRange(refWithPadding, refPos, refPos + ce.getLength()),
Arrays.copyOfRange(newHaplotypeBases, hapPos, newHaplotypeBases.length)));
hapPos += ce.getLength();
refPos += ce.getLength();
newCigar.add(new CigarElement(ce.getLength(), CigarOperator.M));
} else {
refPos += ce.getLength();
newCigar.add(ce);
}
break;
default:
throw new IllegalStateException("Unsupported cigar operator detected: " + ce.getOperator());
}
}
final Haplotype returnHaplotype = new Haplotype(newHaplotypeBases, haplotype.isReference());
returnHaplotype.setCigar( newCigar );
return returnHaplotype;
}
/**
* We use CigarOperator.N as the signal that an incomplete or too divergent bubble was found during bubble traversal
* @param c the cigar to test
* @return true if we should skip over this path
*/
@Requires("c != null")
private boolean pathIsTooDivergentFromReference( final Cigar c ) {
for( final CigarElement ce : c.getCigarElements() ) {
if( ce.getOperator().equals(CigarOperator.N) ) {
return true;
}
}
return false;
}
/**
* Left align the given cigar sequentially. This is needed because AlignmentUtils doesn't accept cigars with more than one indel in them.
* This is a target of future work to incorporate and generalize into AlignmentUtils for use by others.
* @param cigar the cigar to left align
* @param refSeq the reference byte array
* @param readSeq the read byte array
* @param refIndex 0-based alignment start position on ref
* @param readIndex 0-based alignment start position on read
* @return the left-aligned cigar
*/
@Ensures({"cigar != null", "refSeq != null", "readSeq != null", "refIndex >= 0", "readIndex >= 0"})
protected Cigar leftAlignCigarSequentially(final Cigar cigar, final byte[] refSeq, final byte[] readSeq, int refIndex, int readIndex) {
final Cigar cigarToReturn = new Cigar();
Cigar cigarToAlign = new Cigar();
for (int i = 0; i < cigar.numCigarElements(); i++) {
final CigarElement ce = cigar.getCigarElement(i);
if (ce.getOperator() == CigarOperator.D || ce.getOperator() == CigarOperator.I) {
cigarToAlign.add(ce);
for( final CigarElement toAdd : AlignmentUtils.leftAlignIndel(cigarToAlign, refSeq, readSeq, refIndex, readIndex, false).getCigarElements() ) {
cigarToReturn.add(toAdd);
}
refIndex += cigarToAlign.getReferenceLength();
readIndex += cigarToAlign.getReadLength();
cigarToAlign = new Cigar();
} else {
cigarToAlign.add(ce);
}
}
if( !cigarToAlign.isEmpty() ) {
for( final CigarElement toAdd : cigarToAlign.getCigarElements() ) {
cigarToReturn.add(toAdd);
}
}
return cigarToReturn;
}
/**
* Take a haplotype which was generated by injecting an allele into a string of bases and run SW against the reference to determine the variants on the haplotype.
* Unfortunately since this haplotype didn't come from the assembly graph you can't straightforwardly use the bubble traversal algorithm to get this information.
* This is a target for future work as we rewrite the HaplotypeCaller to be more bubble-caller based.
* @param haplotype the candidate haplotype
* @param ref the reference bases to align against
* @param haplotypeList the current list of haplotypes
* @param activeRegionStart the start of the active region in the reference byte array
* @param activeRegionStop the stop of the active region in the reference byte array
* @param FORCE_INCLUSION_FOR_GGA_MODE if true will include in the list even if it already exists
* @return true if the candidate haplotype was successfully incorporated into the haplotype list
*/
@Requires({"ref != null", "ref.length >= activeRegionStop - activeRegionStart"})
private boolean addHaplotypeForGGA( final Haplotype haplotype, final byte[] ref, final List<Haplotype> haplotypeList, final int activeRegionStart, final int activeRegionStop, final boolean FORCE_INCLUSION_FOR_GGA_MODE ) {
if( haplotype == null ) { return false; }
final SWPairwiseAlignment swConsensus = new SWPairwiseAlignment( ref, haplotype.getBases(), SWParameterSet.STANDARD_NGS );
haplotype.setAlignmentStartHapwrtRef( swConsensus.getAlignmentStart2wrt1() );
if( swConsensus.getCigar().toString().contains("S") || swConsensus.getCigar().getReferenceLength() < 60 || swConsensus.getAlignmentStart2wrt1() < 0 ) { // protect against unhelpful haplotype alignments
return false;
}
haplotype.setCigar( AlignmentUtils.leftAlignIndel(swConsensus.getCigar(), ref, haplotype.getBases(), swConsensus.getAlignmentStart2wrt1(), 0, true) );
final int hapStart = ReadUtils.getReadCoordinateForReferenceCoordinate(haplotype.getAlignmentStartHapwrtRef(), haplotype.getCigar(), activeRegionStart, ReadUtils.ClippingTail.LEFT_TAIL, true);
int hapStop = ReadUtils.getReadCoordinateForReferenceCoordinate( haplotype.getAlignmentStartHapwrtRef(), haplotype.getCigar(), activeRegionStop, ReadUtils.ClippingTail.RIGHT_TAIL, true );
if( hapStop == ReadUtils.CLIPPING_GOAL_NOT_REACHED && activeRegionStop == haplotype.getAlignmentStartHapwrtRef() + haplotype.getCigar().getReferenceLength() ) {
hapStop = activeRegionStop; // contract for getReadCoordinateForReferenceCoordinate function says that if read ends at boundary then it is outside of the clipping goal
}
byte[] newHaplotypeBases;
// extend partial haplotypes to contain the full active region sequence
if( hapStart == ReadUtils.CLIPPING_GOAL_NOT_REACHED && hapStop == ReadUtils.CLIPPING_GOAL_NOT_REACHED ) {
newHaplotypeBases = ArrayUtils.addAll( ArrayUtils.addAll( ArrayUtils.subarray(ref, activeRegionStart, swConsensus.getAlignmentStart2wrt1()),
haplotype.getBases()),
ArrayUtils.subarray(ref, swConsensus.getAlignmentStart2wrt1() + swConsensus.getCigar().getReferenceLength(), activeRegionStop) );
} else if( hapStart == ReadUtils.CLIPPING_GOAL_NOT_REACHED ) {
newHaplotypeBases = ArrayUtils.addAll( ArrayUtils.subarray(ref, activeRegionStart, swConsensus.getAlignmentStart2wrt1()), ArrayUtils.subarray(haplotype.getBases(), 0, hapStop) );
} else if( hapStop == ReadUtils.CLIPPING_GOAL_NOT_REACHED ) {
newHaplotypeBases = ArrayUtils.addAll( ArrayUtils.subarray(haplotype.getBases(), hapStart, haplotype.getBases().length), ArrayUtils.subarray(ref, swConsensus.getAlignmentStart2wrt1() + swConsensus.getCigar().getReferenceLength(), activeRegionStop) );
} else {
newHaplotypeBases = ArrayUtils.subarray(haplotype.getBases(), hapStart, hapStop);
}
final Haplotype h = new Haplotype( newHaplotypeBases );
final SWPairwiseAlignment swConsensus2 = new SWPairwiseAlignment( ref, h.getBases(), SWParameterSet.STANDARD_NGS );
h.setAlignmentStartHapwrtRef( swConsensus2.getAlignmentStart2wrt1() );
if ( haplotype.isArtificialHaplotype() ) {
h.setArtificialEvent(haplotype.getArtificialEvent());
}
if( swConsensus2.getCigar().toString().contains("S") || swConsensus2.getCigar().getReferenceLength() != activeRegionStop - activeRegionStart || swConsensus2.getAlignmentStart2wrt1() < 0 ) { // protect against unhelpful haplotype alignments
return false;
}
h.setCigar( AlignmentUtils.leftAlignIndel(swConsensus2.getCigar(), ref, h.getBases(), swConsensus2.getAlignmentStart2wrt1(), 0, true) );
if( FORCE_INCLUSION_FOR_GGA_MODE || !haplotypeList.contains(h) ) {
haplotypeList.add(h);
return true;
} else {
return false;
}
@Override
public String toString() {
return "DeBruijnAssembler{" +
"minKmer=" + minKmer +
'}';
}
}

View File

@ -49,6 +49,7 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.annotator.VariantAnnotatorEngine;
import org.broadinstitute.sting.gatk.walkers.genotyper.GenotypeLikelihoodsCalculationModel;
import org.broadinstitute.sting.gatk.walkers.genotyper.UnifiedGenotyperEngine;
@ -71,7 +72,7 @@ public class GenotypingEngine {
private final boolean DEBUG;
private final boolean USE_FILTERED_READ_MAP_FOR_ANNOTATIONS;
private final static List<Allele> noCall = new ArrayList<Allele>(); // used to noCall all genotypes until the exact model is applied
private final static List<Allele> noCall = new ArrayList<>(); // used to noCall all genotypes until the exact model is applied
private final VariantAnnotatorEngine annotationEngine;
private final MergeVariantsAcrossHaplotypes crossHaplotypeEventMerger;
@ -146,6 +147,7 @@ public class GenotypingEngine {
final GenomeLoc refLoc,
final GenomeLoc activeRegionWindow,
final GenomeLocParser genomeLocParser,
final RefMetaDataTracker tracker,
final List<VariantContext> activeAllelesToGenotype ) {
// sanity check input arguments
if (UG_engine == null) throw new IllegalArgumentException("UG_Engine input can't be null, got "+UG_engine);
@ -162,8 +164,8 @@ public class GenotypingEngine {
final TreeSet<Integer> startPosKeySet = decomposeHaplotypesIntoVariantContexts(haplotypes, haplotypeReadMap, ref, refLoc, activeAllelesToGenotype);
// Walk along each position in the key set and create each event to be outputted
final Set<Haplotype> calledHaplotypes = new HashSet<Haplotype>();
final List<VariantContext> returnCalls = new ArrayList<VariantContext>();
final Set<Haplotype> calledHaplotypes = new HashSet<>();
final List<VariantContext> returnCalls = new ArrayList<>();
for( final int loc : startPosKeySet ) {
if( loc >= activeRegionWindow.getStart() && loc <= activeRegionWindow.getStop() ) { // genotyping an event inside this active region
final List<VariantContext> eventsAtThisLoc = getVCsAtThisLocation(haplotypes, loc, activeAllelesToGenotype);
@ -183,7 +185,7 @@ public class GenotypingEngine {
if( eventsAtThisLoc.size() != mergedVC.getAlternateAlleles().size() ) {
throw new ReviewedStingException("Record size mismatch! Something went wrong in the merging of alleles.");
}
final Map<VariantContext, Allele> mergeMap = new LinkedHashMap<VariantContext, Allele>();
final Map<VariantContext, Allele> mergeMap = new LinkedHashMap<>();
mergeMap.put(null, mergedVC.getReference()); // the reference event (null) --> the reference allele
for(int iii = 0; iii < mergedVC.getAlternateAlleles().size(); iii++) {
mergeMap.put(eventsAtThisLoc.get(iii), mergedVC.getAlternateAllele(iii)); // BUGBUG: This is assuming that the order of alleles is the same as the priority list given to simpleMerge function
@ -204,13 +206,12 @@ public class GenotypingEngine {
convertHaplotypeReadMapToAlleleReadMap( haplotypeReadMap, alleleMapper, 0.0 ) );
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = filterToOnlyOverlappingReads( genomeLocParser, alleleReadMap_annotations, perSampleFilteredReadList, call );
VariantContext annotatedCall = call;
if( annotatedCall.getAlleles().size() != mergedVC.getAlleles().size() ) { // some alleles were removed so reverseTrimming might be necessary!
VariantContext annotatedCall = annotationEngine.annotateContextForActiveRegion(tracker, stratifiedReadMap, call);
if( call.getAlleles().size() != mergedVC.getAlleles().size() ) { // some alleles were removed so reverseTrimming might be necessary!
annotatedCall = GATKVariantContextUtils.reverseTrimAlleles(annotatedCall);
}
annotatedCall = annotationEngine.annotateContext(stratifiedReadMap, annotatedCall);
// maintain the set of all called haplotypes
for ( final Allele calledAllele : call.getAlleles() )
calledHaplotypes.addAll(alleleMapper.get(calledAllele));
@ -244,7 +245,7 @@ public class GenotypingEngine {
if ( in_GGA_mode ) startPosKeySet.clear();
cleanUpSymbolicUnassembledEvents( haplotypes );
//cleanUpSymbolicUnassembledEvents( haplotypes ); // We don't make symbolic alleles so this isn't needed currently
if ( !in_GGA_mode ) {
// run the event merger if we're not in GGA mode
final boolean mergedAnything = crossHaplotypeEventMerger.merge(haplotypes, haplotypeReadMap, startPosKeySet, ref, refLoc);
@ -267,7 +268,7 @@ public class GenotypingEngine {
* @return the list of the sources of vcs in the same order
*/
private List<String> makePriorityList(final List<VariantContext> vcs) {
final List<String> priorityList = new LinkedList<String>();
final List<String> priorityList = new LinkedList<>();
for ( final VariantContext vc : vcs ) priorityList.add(vc.getSource());
return priorityList;
}
@ -276,7 +277,7 @@ public class GenotypingEngine {
final int loc,
final List<VariantContext> activeAllelesToGenotype) {
// the overlapping events to merge into a common reference view
final List<VariantContext> eventsAtThisLoc = new ArrayList<VariantContext>();
final List<VariantContext> eventsAtThisLoc = new ArrayList<>();
if( activeAllelesToGenotype.isEmpty() ) {
for( final Haplotype h : haplotypes ) {
@ -292,7 +293,7 @@ public class GenotypingEngine {
if( compVC.getStart() == loc ) {
int alleleCount = 0;
for( final Allele compAltAllele : compVC.getAlternateAlleles() ) {
List<Allele> alleleSet = new ArrayList<Allele>(2);
List<Allele> alleleSet = new ArrayList<>(2);
alleleSet.add(compVC.getReference());
alleleSet.add(compAltAllele);
final String vcSourceName = "Comp" + compCount + "Allele" + alleleCount;
@ -348,7 +349,7 @@ public class GenotypingEngine {
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList,
final VariantContext call ) {
final Map<String, PerReadAlleleLikelihoodMap> returnMap = new LinkedHashMap<String, PerReadAlleleLikelihoodMap>();
final Map<String, PerReadAlleleLikelihoodMap> returnMap = new LinkedHashMap<>();
final GenomeLoc callLoc = parser.createGenomeLoc(call);
for( final Map.Entry<String, PerReadAlleleLikelihoodMap> sample : perSampleReadMap.entrySet() ) {
final PerReadAlleleLikelihoodMap likelihoodMap = new PerReadAlleleLikelihoodMap();
@ -384,7 +385,7 @@ public class GenotypingEngine {
// TODO - split into input haplotypes and output haplotypes as not to share I/O arguments
@Requires("haplotypes != null")
protected static void cleanUpSymbolicUnassembledEvents( final List<Haplotype> haplotypes ) {
final List<Haplotype> haplotypesToRemove = new ArrayList<Haplotype>();
final List<Haplotype> haplotypesToRemove = new ArrayList<>();
for( final Haplotype h : haplotypes ) {
for( final VariantContext vc : h.getEventMap().getVariantContexts() ) {
if( vc.isSymbolic() ) {
@ -407,7 +408,7 @@ public class GenotypingEngine {
final Map<Allele, List<Haplotype>> alleleMapper,
final double downsamplingFraction ) {
final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap = new LinkedHashMap<String, PerReadAlleleLikelihoodMap>();
final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap = new LinkedHashMap<>();
for( final Map.Entry<String, PerReadAlleleLikelihoodMap> haplotypeReadMapEntry : haplotypeReadMap.entrySet() ) { // for each sample
final PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap = new PerReadAlleleLikelihoodMap();
for( final Map.Entry<Allele, List<Haplotype>> alleleMapperEntry : alleleMapper.entrySet() ) { // for each output allele
@ -430,7 +431,7 @@ public class GenotypingEngine {
}
protected static Map<Allele, List<Haplotype>> createAlleleMapper( final Map<VariantContext, Allele> mergeMap, final Map<Event, List<Haplotype>> eventMap ) {
final Map<Allele, List<Haplotype>> alleleMapper = new LinkedHashMap<Allele, List<Haplotype>>();
final Map<Allele, List<Haplotype>> alleleMapper = new LinkedHashMap<>();
for( final Map.Entry<VariantContext, Allele> entry : mergeMap.entrySet() ) {
alleleMapper.put(entry.getValue(), eventMap.get(new Event(entry.getKey())));
}
@ -441,100 +442,33 @@ public class GenotypingEngine {
@Ensures({"result.size() == eventsAtThisLoc.size() + 1"})
protected static Map<Event, List<Haplotype>> createEventMapper( final int loc, final List<VariantContext> eventsAtThisLoc, final List<Haplotype> haplotypes ) {
final Map<Event, List<Haplotype>> eventMapper = new LinkedHashMap<Event, List<Haplotype>>(eventsAtThisLoc.size()+1);
VariantContext refVC = eventsAtThisLoc.get(0); // the genome loc is the only safe thing to pull out of this VC because ref/alt pairs might change reference basis
eventMapper.put(new Event(null), new ArrayList<Haplotype>());
final Map<Event, List<Haplotype>> eventMapper = new LinkedHashMap<>(eventsAtThisLoc.size()+1);
final Event refEvent = new Event(null);
eventMapper.put(refEvent, new ArrayList<Haplotype>());
for( final VariantContext vc : eventsAtThisLoc ) {
eventMapper.put(new Event(vc), new ArrayList<Haplotype>());
}
final List<Haplotype> undeterminedHaplotypes = new ArrayList<Haplotype>(haplotypes.size());
for( final Haplotype h : haplotypes ) {
if( h.isArtificialHaplotype() && loc == h.getArtificialAllelePosition() ) {
final List<Allele> alleles = new ArrayList<Allele>(2);
alleles.add(h.getArtificialRefAllele());
alleles.add(h.getArtificialAltAllele());
final Event artificialVC = new Event( (new VariantContextBuilder()).source("artificialHaplotype")
.alleles(alleles)
.loc(refVC.getChr(), refVC.getStart(), refVC.getStart() + h.getArtificialRefAllele().length() - 1).make() );
if( eventMapper.containsKey(artificialVC) ) {
eventMapper.get(artificialVC).add(h);
}
} else if( h.getEventMap().get(loc) == null ) { // no event at this location so let's investigate later
undeterminedHaplotypes.add(h);
if( h.getEventMap().get(loc) == null ) {
eventMapper.get(refEvent).add(h);
} else {
boolean haplotypeIsDetermined = false;
for( final VariantContext vcAtThisLoc : eventsAtThisLoc ) {
if( h.getEventMap().get(loc).hasSameAllelesAs(vcAtThisLoc) ) {
eventMapper.get(new Event(vcAtThisLoc)).add(h);
haplotypeIsDetermined = true;
break;
}
}
if( !haplotypeIsDetermined )
undeterminedHaplotypes.add(h);
}
}
for( final Haplotype h : undeterminedHaplotypes ) {
Event matchingEvent = new Event(null);
for( final Map.Entry<Event, List<Haplotype>> eventToTest : eventMapper.entrySet() ) {
// don't test against the reference allele
if( eventToTest.getKey().equals(new Event(null)) )
continue;
// only try to disambiguate for alleles that have had haplotypes previously assigned above
if( eventToTest.getValue().isEmpty() )
continue;
final Haplotype artificialHaplotype = eventToTest.getValue().get(0);
if( isSubSetOf(artificialHaplotype.getEventMap(), h.getEventMap(), true) ) {
matchingEvent = eventToTest.getKey();
break;
}
}
eventMapper.get(matchingEvent).add(h);
}
return eventMapper;
}
protected static boolean isSubSetOf(final Map<Integer, VariantContext> subset, final Map<Integer, VariantContext> superset, final boolean resolveSupersetToSubset) {
for ( final Map.Entry<Integer, VariantContext> fromSubset : subset.entrySet() ) {
final VariantContext fromSuperset = superset.get(fromSubset.getKey());
if ( fromSuperset == null )
return false;
List<Allele> supersetAlleles = fromSuperset.getAlternateAlleles();
if ( resolveSupersetToSubset )
supersetAlleles = resolveAlternateAlleles(fromSubset.getValue().getReference(), fromSuperset.getReference(), supersetAlleles);
if ( !supersetAlleles.contains(fromSubset.getValue().getAlternateAllele(0)) )
return false;
}
return true;
}
private static List<Allele> resolveAlternateAlleles(final Allele targetReference, final Allele actualReference, final List<Allele> currentAlleles) {
if ( targetReference.length() <= actualReference.length() )
return currentAlleles;
final List<Allele> newAlleles = new ArrayList<Allele>(currentAlleles.size());
final byte[] extraBases = Arrays.copyOfRange(targetReference.getBases(), actualReference.length(), targetReference.length());
for ( final Allele a : currentAlleles ) {
newAlleles.add(Allele.extend(a, extraBases));
}
return newAlleles;
}
@Ensures({"result.size() == haplotypeAllelesForSample.size()"})
protected static List<Allele> findEventAllelesInSample( final List<Allele> eventAlleles, final List<Allele> haplotypeAlleles, final List<Allele> haplotypeAllelesForSample, final List<List<Haplotype>> alleleMapper, final List<Haplotype> haplotypes ) {
if( haplotypeAllelesForSample.contains(Allele.NO_CALL) ) { return noCall; }
final List<Allele> eventAllelesForSample = new ArrayList<Allele>();
final List<Allele> eventAllelesForSample = new ArrayList<>();
for( final Allele a : haplotypeAllelesForSample ) {
final Haplotype haplotype = haplotypes.get(haplotypeAlleles.indexOf(a));
for( int iii = 0; iii < alleleMapper.size(); iii++ ) {

View File

@ -47,6 +47,10 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Ensures;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import net.sf.samtools.SAMFileWriter;
import org.broadinstitute.sting.commandline.*;
import org.broadinstitute.sting.gatk.CommandLineGATK;
import org.broadinstitute.sting.gatk.arguments.DbsnpArgumentCollection;
@ -68,6 +72,7 @@ import org.broadinstitute.sting.gatk.walkers.genotyper.UnifiedArgumentCollection
import org.broadinstitute.sting.gatk.walkers.genotyper.UnifiedGenotyperEngine;
import org.broadinstitute.sting.gatk.walkers.genotyper.VariantCallContext;
import org.broadinstitute.sting.gatk.walkers.genotyper.afcalc.AFCalcFactory;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.readthreading.ReadThreadingAssembler;
import org.broadinstitute.sting.utils.*;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
import org.broadinstitute.sting.utils.activeregion.ActiveRegionReadState;
@ -75,8 +80,6 @@ import org.broadinstitute.sting.utils.activeregion.ActivityProfileState;
import org.broadinstitute.sting.utils.clipping.ReadClipper;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.sting.utils.fragments.FragmentCollection;
import org.broadinstitute.sting.utils.fragments.FragmentUtils;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.haplotype.*;
import org.broadinstitute.sting.utils.haplotypeBAMWriter.HaplotypeBAMWriter;
@ -135,10 +138,14 @@ import java.util.*;
@DocumentedGATKFeature( groupName = HelpConstants.DOCS_CAT_VARDISC, extraDocs = {CommandLineGATK.class} )
@PartitionBy(PartitionType.LOCUS)
@BAQMode(ApplicationTime = ReadTransformer.ApplicationTime.FORBIDDEN)
@ActiveRegionTraversalParameters(extension=200, maxRegion=300)
@ActiveRegionTraversalParameters(extension=100, maxRegion=300)
@ReadFilters({HCMappingQualityFilter.class})
@Downsample(by= DownsampleType.BY_SAMPLE, toCoverage=250)
public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implements AnnotatorCompatible {
public class HaplotypeCaller extends ActiveRegionWalker<List<VariantContext>, Integer> implements AnnotatorCompatible, NanoSchedulable {
// -----------------------------------------------------------------------------------------------
// general haplotype caller arguments
// -----------------------------------------------------------------------------------------------
/**
* A raw, unfiltered, highly sensitive callset in VCF format.
*/
@ -185,64 +192,6 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
@Argument(fullName="bamWriterType", shortName="bamWriterType", doc="How should haplotypes be written to the BAM?", required = false)
public HaplotypeBAMWriter.Type bamWriterType = HaplotypeBAMWriter.Type.CALLED_HAPLOTYPES;
/**
* The PairHMM implementation to use for genotype likelihood calculations. The various implementations balance a tradeoff of accuracy and runtime.
*/
@Advanced
@Argument(fullName = "pair_hmm_implementation", shortName = "pairHMM", doc = "The PairHMM implementation to use for genotype likelihood calculations", required = false)
public PairHMM.HMM_IMPLEMENTATION pairHMM = PairHMM.HMM_IMPLEMENTATION.LOGLESS_CACHING;
@Hidden
@Argument(fullName="keepRG", shortName="keepRG", doc="Only use read from this read group when making calls (but use all reads to build the assembly)", required = false)
protected String keepRG = null;
@Advanced
@Argument(fullName="minPruning", shortName="minPruning", doc = "The minimum allowed pruning factor in assembly graph. Paths with <= X supporting kmers are pruned from the graph", required = false)
protected int MIN_PRUNE_FACTOR = 0;
@Advanced
@Argument(fullName="gcpHMM", shortName="gcpHMM", doc="Flat gap continuation penalty for use in the Pair HMM", required = false)
protected int gcpHMM = 10;
@Advanced
@Argument(fullName="maxNumHaplotypesInPopulation", shortName="maxNumHaplotypesInPopulation", doc="Maximum number of haplotypes to consider for your population. This number will probably need to be increased when calling organisms with high heterozygosity.", required = false)
protected int maxNumHaplotypesInPopulation = 25;
@Advanced
@Argument(fullName="minKmer", shortName="minKmer", doc="Minimum kmer length to use in the assembly graph", required = false)
protected int minKmer = 11;
/**
* If this flag is provided, the haplotype caller will include unmapped reads in the assembly and calling
* when these reads occur in the region being analyzed. Typically, for paired end analyses, one pair of the
* read can map, but if its pair is too divergent then it may be unmapped and placed next to its mate, taking
* the mates contig and alignment start. If this flag is provided the haplotype caller will see such reads,
* and may make use of them in assembly and calling, where possible.
*/
@Hidden
@Argument(fullName="includeUmappedReads", shortName="unmapped", doc="If provided, unmapped reads with chromosomal coordinates (i.e., those placed to their maps) will be included in the assembly and calling", required = false)
protected boolean includeUnmappedReads = false;
@Advanced
@Argument(fullName="useAllelesTrigger", shortName="allelesTrigger", doc = "If specified, use additional trigger on variants found in an external alleles file", required=false)
protected boolean USE_ALLELES_TRIGGER = false;
@Advanced
@Argument(fullName="useFilteredReadsForAnnotations", shortName="useFilteredReadsForAnnotations", doc = "If specified, use the contamination-filtered read maps for the purposes of annotating variants", required=false)
protected boolean USE_FILTERED_READ_MAP_FOR_ANNOTATIONS = false;
@Hidden
@Argument(fullName="justDetermineActiveRegions", shortName="justDetermineActiveRegions", doc = "If specified, the HC won't actually do any assembly or calling, it'll just run the upfront active region determination code. Useful for benchmarking and scalability testing", required=false)
protected boolean justDetermineActiveRegions = false;
@Hidden
@Argument(fullName="dontGenotype", shortName="dontGenotype", doc = "If specified, the HC will do any assembly but won't do calling. Useful for benchmarking and scalability testing", required=false)
protected boolean dontGenotype = false;
@Hidden
@Argument(fullName="errorCorrectKmers", shortName="errorCorrectKmers", doc = "Use an exploratory algorithm to error correct the kmers used during assembly. May cause fundamental problems with the assembly graph itself", required=false)
protected boolean errorCorrectKmers = false;
/**
* rsIDs from this file are used to populate the ID column of the output. Also, the DB INFO flag will be set when appropriate.
* dbSNP is not used in any way for the calculations themselves.
@ -272,7 +221,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
*/
@Advanced
@Argument(fullName="annotation", shortName="A", doc="One or more specific annotations to apply to variant calls", required=false)
protected List<String> annotationsToUse = new ArrayList<String>(Arrays.asList(new String[]{"ClippingRankSumTest"}));
protected List<String> annotationsToUse = new ArrayList<>(Arrays.asList(new String[]{"ClippingRankSumTest", "DepthPerSampleHC"}));
/**
* Which annotations to exclude from output in the VCF file. Note that this argument has higher priority than the -A or -G arguments,
@ -282,10 +231,6 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
@Argument(fullName="excludeAnnotation", shortName="XA", doc="One or more specific annotations to exclude", required=false)
protected List<String> annotationsToExclude = new ArrayList<String>(Arrays.asList(new String[]{"SpanningDeletions", "TandemRepeatAnnotator"}));
@Advanced
@Argument(fullName="mergeVariantsViaLD", shortName="mergeVariantsViaLD", doc="If specified, we will merge variants together into block substitutions that are in strong local LD", required = false)
protected boolean mergeVariantsViaLD = false;
/**
* Which groups of annotations to add to the output VCF file. See the VariantAnnotator -list argument to view available groups.
*/
@ -295,13 +240,147 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
@ArgumentCollection
private StandardCallerArgumentCollection SCAC = new StandardCallerArgumentCollection();
// -----------------------------------------------------------------------------------------------
// arguments to control internal behavior of the debruijn assembler
// -----------------------------------------------------------------------------------------------
@Advanced
@Argument(fullName="useDebruijnAssembler", shortName="useDebruijnAssembler", doc="If specified, we will use the old DeBruijn assembler. Depreciated as of 2.6", required = false)
protected boolean useDebruijnAssembler = false;
@Advanced
@Argument(fullName="minKmerForDebruijnAssembler", shortName="minKmerForDebruijnAssembler", doc="Minimum kmer length to use in the debruijn assembly graph", required = false)
protected int minKmerForDebruijnAssembler = 11;
@Advanced
@Argument(fullName="onlyUseKmerSizeForDebruijnAssembler", shortName="onlyUseKmerSizeForDebruijnAssembler", doc="If specified, we will only build kmer graphs with this kmer size in the debruijn", required = false)
protected int onlyUseKmerSizeForDebruijnAssembler = -1;
// -----------------------------------------------------------------------------------------------
// arguments to control internal behavior of the read threading assembler
// -----------------------------------------------------------------------------------------------
@Advanced
@Argument(fullName="kmerSize", shortName="kmerSize", doc="Kmer size to use in the read threading assembler", required = false)
protected List<Integer> kmerSizes = Arrays.asList(10, 25);
@Advanced
@Argument(fullName="dontIncreaseKmerSizesForCycles", shortName="dontIncreaseKmerSizesForCycles", doc="Should we disable the iterating over kmer sizes when graph cycles are detected?", required = false)
protected boolean dontIncreaseKmerSizesForCycles = false;
@Advanced
@Argument(fullName="numPruningSamples", shortName="numPruningSamples", doc="The number of samples that must pass the minPuning factor in order for the path to be kept", required = false)
protected int numPruningSamples = 1;
/**
* Assembly graph can be quite complex, and could imply a very large number of possible haplotypes. Each haplotype
* considered requires N PairHMM evaluations if there are N reads across all samples. In order to control the
* run of the haplotype caller we only take maxPathsPerSample * nSample paths from the graph, in order of their
* weights, no matter how many paths are possible to generate from the graph. Putting this number too low
* will result in dropping true variation because paths that include the real variant are not even considered.
*/
@Advanced
@Argument(fullName="maxPathsPerSample", shortName="maxPathsPerSample", doc="Max number of paths to consider for the read threading assembler per sample.", required = false)
protected int maxPathsPerSample = 10;
/**
* The minimum number of paths to advance forward for genotyping, regardless of the
* number of samples
*/
private final static int MIN_PATHS_PER_GRAPH = 128;
@Hidden
@Argument(fullName="dontRecoverDanglingTails", shortName="dontRecoverDanglingTails", doc="Should we disable dangling tail recovery in the read threading assembler?", required = false)
protected boolean dontRecoverDanglingTails = false;
// -----------------------------------------------------------------------------------------------
// general advanced arguments to control haplotype caller behavior
// -----------------------------------------------------------------------------------------------
@Advanced
@Argument(fullName="minPruning", shortName="minPruning", doc = "The minimum allowed pruning factor in assembly graph. Paths with <= X supporting kmers are pruned from the graph", required = false)
protected int MIN_PRUNE_FACTOR = 2;
@Advanced
@Argument(fullName="gcpHMM", shortName="gcpHMM", doc="Flat gap continuation penalty for use in the Pair HMM", required = false)
protected int gcpHMM = 10;
/**
* If this flag is provided, the haplotype caller will include unmapped reads in the assembly and calling
* when these reads occur in the region being analyzed. Typically, for paired end analyses, one pair of the
* read can map, but if its pair is too divergent then it may be unmapped and placed next to its mate, taking
* the mates contig and alignment start. If this flag is provided the haplotype caller will see such reads,
* and may make use of them in assembly and calling, where possible.
*/
@Hidden
@Argument(fullName="includeUmappedReads", shortName="unmapped", doc="If provided, unmapped reads with chromosomal coordinates (i.e., those placed to their maps) will be included in the assembly and calling", required = false)
protected boolean includeUnmappedReads = false;
@Advanced
@Argument(fullName="useAllelesTrigger", shortName="allelesTrigger", doc = "If specified, use additional trigger on variants found in an external alleles file", required=false)
protected boolean USE_ALLELES_TRIGGER = false;
@Advanced
@Argument(fullName="useFilteredReadsForAnnotations", shortName="useFilteredReadsForAnnotations", doc = "If specified, use the contamination-filtered read maps for the purposes of annotating variants", required=false)
protected boolean USE_FILTERED_READ_MAP_FOR_ANNOTATIONS = false;
/**
* The phredScaledGlobalReadMismappingRate reflects the average global mismapping rate of all reads, regardless of their
* mapping quality. This term effects the probability that a read originated from the reference haplotype, regardless of
* its edit distance from the reference, in that the read could have originated from the reference haplotype but
* from another location in the genome. Suppose a read has many mismatches from the reference, say like 5, but
* has a very high mapping quality of 60. Without this parameter, the read would contribute 5 * Q30 evidence
* in favor of its 5 mismatch haplotype compared to reference, potentially enough to make a call off that single
* read for all of these events. With this parameter set to Q30, though, the maximum evidence against the reference
* that this (and any) read could contribute against reference is Q30.
*
* Set this term to any negative number to turn off the global mapping rate
*/
@Advanced
@Argument(fullName="phredScaledGlobalReadMismappingRate", shortName="globalMAPQ", doc="The global assumed mismapping rate for reads", required = false)
protected int phredScaledGlobalReadMismappingRate = 45;
@Advanced
@Argument(fullName="maxNumHaplotypesInPopulation", shortName="maxNumHaplotypesInPopulation", doc="Maximum number of haplotypes to consider for your population. This number will probably need to be increased when calling organisms with high heterozygosity.", required = false)
protected int maxNumHaplotypesInPopulation = 25;
@Advanced
@Argument(fullName="mergeVariantsViaLD", shortName="mergeVariantsViaLD", doc="If specified, we will merge variants together into block substitutions that are in strong local LD", required = false)
protected boolean mergeVariantsViaLD = false;
// -----------------------------------------------------------------------------------------------
// arguments for debugging / developing the haplotype caller
// -----------------------------------------------------------------------------------------------
/**
* The PairHMM implementation to use for genotype likelihood calculations. The various implementations balance a tradeoff of accuracy and runtime.
*/
@Hidden
@Argument(fullName = "pair_hmm_implementation", shortName = "pairHMM", doc = "The PairHMM implementation to use for genotype likelihood calculations", required = false)
public PairHMM.HMM_IMPLEMENTATION pairHMM = PairHMM.HMM_IMPLEMENTATION.LOGLESS_CACHING;
@Hidden
@Argument(fullName="keepRG", shortName="keepRG", doc="Only use read from this read group when making calls (but use all reads to build the assembly)", required = false)
protected String keepRG = null;
@Hidden
@Argument(fullName="justDetermineActiveRegions", shortName="justDetermineActiveRegions", doc = "If specified, the HC won't actually do any assembly or calling, it'll just run the upfront active region determination code. Useful for benchmarking and scalability testing", required=false)
protected boolean justDetermineActiveRegions = false;
@Hidden
@Argument(fullName="dontGenotype", shortName="dontGenotype", doc = "If specified, the HC will do any assembly but won't do calling. Useful for benchmarking and scalability testing", required=false)
protected boolean dontGenotype = false;
@Hidden
@Argument(fullName="errorCorrectKmers", shortName="errorCorrectKmers", doc = "Use an exploratory algorithm to error correct the kmers used during assembly. May cause fundamental problems with the assembly graph itself", required=false)
protected boolean errorCorrectKmers = false;
@Advanced
@Argument(fullName="debug", shortName="debug", doc="If specified, print out very verbose debug information about each triggering active region", required = false)
protected boolean DEBUG;
@Advanced
@Hidden
@Argument(fullName="debugGraphTransformations", shortName="debugGraphTransformations", doc="If specified, we will write DOT formatted graph files out of the assembler for only this graph size", required = false)
protected int debugGraphTransformations = -1;
protected boolean debugGraphTransformations = false;
@Hidden // TODO -- not currently useful
@Argument(fullName="useLowQualityBasesForAssembly", shortName="useLowQualityBasesForAssembly", doc="If specified, we will include low quality bases when doing the assembly", required = false)
@ -311,10 +390,35 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
@Argument(fullName="dontTrimActiveRegions", shortName="dontTrimActiveRegions", doc="If specified, we will not trim down the active region from the full region (active + extension) to just the active interval for genotyping", required = false)
protected boolean dontTrimActiveRegions = false;
@Hidden
@Argument(fullName="dontUseSoftClippedBases", shortName="dontUseSoftClippedBases", doc="If specified, we will not analyze soft clipped bases in the reads", required = false)
protected boolean dontUseSoftClippedBases = false;
@Hidden
@Argument(fullName="captureAssemblyFailureBAM", shortName="captureAssemblyFailureBAM", doc="If specified, we will write a BAM called assemblyFailure.bam capturing all of the reads that were in the active region when the assembler failed for any reason", required = false)
protected boolean captureAssemblyFailureBAM = false;
@Hidden
@Argument(fullName="allowCyclesInKmerGraphToGeneratePaths", shortName="allowCyclesInKmerGraphToGeneratePaths", doc="If specified, we will allow cycles in the kmer graphs to generate paths with multiple copies of the path sequenece rather than just the shortest paths", required = false)
protected boolean allowCyclesInKmerGraphToGeneratePaths = false;
// Parameters to control read error correction
@Hidden
@Argument(fullName="errorCorrectReads", shortName="errorCorrectReads", doc = "Use an exploratory algorithm to error correct the kmers used during assembly. May cause fundamental problems with the assembly graph itself", required=false)
protected boolean errorCorrectReads = false;
@Hidden
@Argument(fullName="kmerLengthForReadErrorCorrection", shortName="kmerLengthForReadErrorCorrection", doc = "Use an exploratory algorithm to error correct the kmers used during assembly. May cause fundamental problems with the assembly graph itself", required=false)
protected int kmerLengthForReadErrorCorrection = 25;
@Hidden
@Argument(fullName="minObservationsForKmerToBeSolid", shortName="minObservationsForKmerToBeSolid", doc = "A k-mer must be seen at least these times for it considered to be solid", required=false)
protected int minObservationsForKmerToBeSolid = 20;
// -----------------------------------------------------------------------------------------------
// done with Haplotype caller parameters
// -----------------------------------------------------------------------------------------------
// the UG engines
private UnifiedGenotyperEngine UG_engine = null;
@ -342,7 +446,10 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
private final static int PADDING_AROUND_OTHERS_FOR_CALLING = 150;
// the maximum extent into the full active region extension that we're willing to go in genotyping our events
private final static int MAX_GENOTYPING_ACTIVE_REGION_EXTENSION = 25;
private final static int MAX_DISCOVERY_ACTIVE_REGION_EXTENSION = 25;
private final static int MAX_GGA_ACTIVE_REGION_EXTENSION = 100;
private ActiveRegionTrimmer trimmer = null;
private final static int maxReadsInRegionPerSample = 1000; // TODO -- should be an argument
private final static int minReadsPerAlignmentStart = 5; // TODO -- should be an argument
@ -350,10 +457,11 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
// bases with quality less than or equal to this value are trimmed off the tails of the reads
private static final byte MIN_TAIL_QUALITY = 20;
private List<String> samplesList = new ArrayList<String>();
private final static double LOG_ONE_HALF = -Math.log10(2.0);
private final static double LOG_ONE_THIRD = -Math.log10(3.0);
private final List<VariantContext> allelesToGenotype = new ArrayList<VariantContext>();
private static final byte MIN_TAIL_QUALITY_WITH_ERROR_CORRECTION = 6;
// the minimum length of a read we'd consider using for genotyping
private final static int MIN_READ_LENGTH = 10;
private List<String> samplesList = new ArrayList<>();
private final static Allele FAKE_REF_ALLELE = Allele.create("N", true); // used in isActive function to call into UG Engine. Should never appear anywhere in a VCF file
private final static Allele FAKE_ALT_ALLELE = Allele.create("<FAKE_ALT>", false); // used in isActive function to call into UG Engine. Should never appear anywhere in a VCF file
@ -373,6 +481,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
// get all of the unique sample names
Set<String> samples = SampleUtils.getSAMFileSamples(getToolkit().getSAMFileHeader());
samplesList.addAll( samples );
final int nSamples = samples.size();
// initialize the UnifiedGenotyper Engine which is used to call into the exact model
final UnifiedArgumentCollection UAC = new UnifiedArgumentCollection( SCAC ); // this adapter is used so that the full set of unused UG arguments aren't exposed to the HC user
UG_engine = new UnifiedGenotyperEngine(getToolkit(), UAC, logger, null, null, samples, GATKVariantContextUtils.DEFAULT_PLOIDY);
@ -428,14 +537,36 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
throw new UserException.CouldNotReadInputFile(getToolkit().getArguments().referenceFile, e);
}
// setup the assembler
assemblyEngine = new DeBruijnAssembler(DEBUG, debugGraphTransformations, minKmer, allowCyclesInKmerGraphToGeneratePaths);
// create and setup the assembler
final int maxAllowedPathsForReadThreadingAssembler = Math.max(maxPathsPerSample * nSamples, MIN_PATHS_PER_GRAPH);
assemblyEngine = useDebruijnAssembler
? new DeBruijnAssembler(minKmerForDebruijnAssembler, onlyUseKmerSizeForDebruijnAssembler)
: new ReadThreadingAssembler(maxAllowedPathsForReadThreadingAssembler, kmerSizes, dontIncreaseKmerSizesForCycles, numPruningSamples);
assemblyEngine.setErrorCorrectKmers(errorCorrectKmers);
assemblyEngine.setPruneFactor(MIN_PRUNE_FACTOR);
assemblyEngine.setDebug(DEBUG);
assemblyEngine.setDebugGraphTransformations(debugGraphTransformations);
assemblyEngine.setAllowCyclesInKmerGraphToGeneratePaths(allowCyclesInKmerGraphToGeneratePaths);
assemblyEngine.setRecoverDanglingTails(!dontRecoverDanglingTails);
if ( graphWriter != null ) assemblyEngine.setGraphWriter(graphWriter);
if ( useLowQualityBasesForAssembly ) assemblyEngine.setMinBaseQualityToUseInAssembly((byte)1);
likelihoodCalculationEngine = new LikelihoodCalculationEngine( (byte)gcpHMM, DEBUG, pairHMM );
// setup the likelihood calculation engine
if ( phredScaledGlobalReadMismappingRate < 0 ) phredScaledGlobalReadMismappingRate = -1;
// configure the global mismapping rate
final double log10GlobalReadMismappingRate;
if ( phredScaledGlobalReadMismappingRate < 0 ) {
log10GlobalReadMismappingRate = - Double.MAX_VALUE;
} else {
log10GlobalReadMismappingRate = QualityUtils.qualToErrorProbLog10(phredScaledGlobalReadMismappingRate);
logger.info("Using global mismapping rate of " + phredScaledGlobalReadMismappingRate + " => " + log10GlobalReadMismappingRate + " in log10 likelihood units");
}
// create our likelihood calculation engine
likelihoodCalculationEngine = new LikelihoodCalculationEngine( (byte)gcpHMM, DEBUG, pairHMM, log10GlobalReadMismappingRate );
final MergeVariantsAcrossHaplotypes variantMerger = mergeVariantsViaLD ? new LDMerger(DEBUG, 10, 1) : new MergeVariantsAcrossHaplotypes();
@ -443,6 +574,10 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
if ( bamWriter != null )
haplotypeBAMWriter = HaplotypeBAMWriter.create(bamWriterType, bamWriter, getToolkit().getSAMFileHeader());
trimmer = new ActiveRegionTrimmer(DEBUG, PADDING_AROUND_SNPS_FOR_CALLING, PADDING_AROUND_OTHERS_FOR_CALLING,
UAC.GenotypingMode.equals(GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES) ? MAX_GGA_ACTIVE_REGION_EXTENSION : MAX_DISCOVERY_ACTIVE_REGION_EXTENSION,
getToolkit().getGenomeLocParser());
}
//---------------------------------------------------------------------------------------------------------------
@ -481,7 +616,6 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
if( UG_engine.getUAC().GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ) {
final VariantContext vcFromAllelesRod = UnifiedGenotyperEngine.getVCFromAllelesRod(tracker, ref, ref.getLocus(), false, logger, UG_engine.getUAC().alleles);
if( vcFromAllelesRod != null ) {
allelesToGenotype.add(vcFromAllelesRod); // save for later for processing during the ActiveRegion's map call. Should be folded into a RefMetaDataTracker object
return new ActivityProfileState(ref.getLocus(), 1.0);
}
}
@ -494,7 +628,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
// if we don't have any data, just abort early
return new ActivityProfileState(ref.getLocus(), 0.0);
final List<Allele> noCall = new ArrayList<Allele>(); // used to noCall all genotypes until the exact model is applied
final List<Allele> noCall = new ArrayList<>(); // used to noCall all genotypes until the exact model is applied
noCall.add(Allele.NO_CALL);
final Map<String, AlignmentContext> splitContexts = AlignmentContextUtils.splitContextBySampleName(context);
@ -516,14 +650,14 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
}
}
genotypeLikelihoods[AA] += p.getRepresentativeCount() * QualityUtils.qualToProbLog10(qual);
genotypeLikelihoods[AB] += p.getRepresentativeCount() * MathUtils.approximateLog10SumLog10( QualityUtils.qualToProbLog10(qual) + LOG_ONE_HALF, QualityUtils.qualToErrorProbLog10(qual) + LOG_ONE_THIRD + LOG_ONE_HALF );
genotypeLikelihoods[BB] += p.getRepresentativeCount() * QualityUtils.qualToErrorProbLog10(qual) + LOG_ONE_THIRD;
genotypeLikelihoods[AB] += p.getRepresentativeCount() * MathUtils.approximateLog10SumLog10( QualityUtils.qualToProbLog10(qual) + MathUtils.LOG_ONE_HALF, QualityUtils.qualToErrorProbLog10(qual) + MathUtils.LOG_ONE_THIRD + MathUtils.LOG_ONE_HALF );
genotypeLikelihoods[BB] += p.getRepresentativeCount() * QualityUtils.qualToErrorProbLog10(qual) + MathUtils.LOG_ONE_THIRD;
}
}
genotypes.add( new GenotypeBuilder(sample.getKey()).alleles(noCall).PL(genotypeLikelihoods).make() );
}
final List<Allele> alleles = new ArrayList<Allele>();
final List<Allele> alleles = new ArrayList<>();
alleles.add( FAKE_REF_ALLELE );
alleles.add( FAKE_ALT_ALLELE );
final VariantCallContext vcOut = UG_engine_simple_genotyper.calculateGenotypes(new VariantContextBuilder("HCisActive!", context.getContig(), context.getLocation().getStart(), context.getLocation().getStop(), alleles).genotypes(genotypes).make(), GenotypeLikelihoodsCalculationModel.Model.INDEL);
@ -538,74 +672,73 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
//
//---------------------------------------------------------------------------------------------------------------
private final static List<VariantContext> NO_CALLS = Collections.emptyList();
@Override
public Integer map( final ActiveRegion originalActiveRegion, final RefMetaDataTracker metaDataTracker ) {
public List<VariantContext> map( final ActiveRegion originalActiveRegion, final RefMetaDataTracker metaDataTracker ) {
if ( justDetermineActiveRegions )
// we're benchmarking ART and/or the active region determination code in the HC, just leave without doing any work
return 1;
return NO_CALLS;
if( !originalActiveRegion.isActive() ) { return 0; } // Not active so nothing to do!
if( !originalActiveRegion.isActive() ) { return NO_CALLS; } // Not active so nothing to do!
final List<VariantContext> activeAllelesToGenotype = new ArrayList<VariantContext>();
final List<VariantContext> activeAllelesToGenotype = new ArrayList<>();
if( UG_engine.getUAC().GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ) {
for( final VariantContext vc : allelesToGenotype ) {
if( originalActiveRegion.getLocation().overlapsP( getToolkit().getGenomeLocParser().createGenomeLoc(vc) ) ) {
for ( final VariantContext vc : metaDataTracker.getValues(UG_engine.getUAC().alleles) ) {
if ( vc.isNotFiltered() ) {
activeAllelesToGenotype.add(vc); // do something with these VCs during GGA mode
}
}
allelesToGenotype.removeAll( activeAllelesToGenotype );
// No alleles found in this region so nothing to do!
if ( activeAllelesToGenotype.isEmpty() ) { return 0; }
if ( activeAllelesToGenotype.isEmpty() ) { return NO_CALLS; }
} else {
if( originalActiveRegion.size() == 0 ) { return 0; } // No reads here so nothing to do!
if( originalActiveRegion.size() == 0 ) { return NO_CALLS; } // No reads here so nothing to do!
}
// run the local assembler, getting back a collection of information on how we should proceed
final AssemblyResult assemblyResult = assembleReads(originalActiveRegion, activeAllelesToGenotype);
// abort early if something is out of the acceptable range
if( assemblyResult.haplotypes.size() == 1 ) { return 1; } // only the reference haplotype remains so nothing else to do!
if (dontGenotype) return 1; // user requested we not proceed
if( ! assemblyResult.isVariationPresent() ) { return NO_CALLS; } // only the reference haplotype remains so nothing else to do!
if (dontGenotype) return NO_CALLS; // user requested we not proceed
// filter out reads from genotyping which fail mapping quality based criteria
final List<GATKSAMRecord> filteredReads = filterNonPassingReads( assemblyResult.regionForGenotyping );
final Collection<GATKSAMRecord> filteredReads = filterNonPassingReads( assemblyResult.regionForGenotyping );
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList = splitReadsBySample( filteredReads );
if( assemblyResult.regionForGenotyping.size() == 0 ) { return 1; } // no reads remain after filtering so nothing else to do!
if( assemblyResult.regionForGenotyping.size() == 0 ) { return NO_CALLS; } // no reads remain after filtering so nothing else to do!
// evaluate each sample's reads against all haplotypes
//logger.info("Computing read likelihoods with " + assemblyResult.regionForGenotyping.size() + " reads");
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = likelihoodCalculationEngine.computeReadLikelihoods( assemblyResult.haplotypes, splitReadsBySample( assemblyResult.regionForGenotyping.getReads() ) );
// subset down to only the best haplotypes to be genotyped in all samples ( in GGA mode use all discovered haplotypes )
final List<Haplotype> bestHaplotypes = selectBestHaplotypesForGenotyping(assemblyResult.haplotypes, stratifiedReadMap);
// Note: we used to subset down at this point to only the "best" haplotypes in all samples for genotyping, but there
// was a bad interaction between that selection and the marginalization that happens over each event when computing
// GLs. In particular, for samples that are heterozygous non-reference (B/C) the marginalization for B treats the
// haplotype containing C as reference (and vice versa). Now this is fine if all possible haplotypes are included
// in the genotyping, but we lose information if we select down to a few haplotypes. [EB]
final GenotypingEngine.CalledHaplotypes calledHaplotypes = genotypingEngine.assignGenotypeLikelihoods( UG_engine,
bestHaplotypes,
assemblyResult.haplotypes,
stratifiedReadMap,
perSampleFilteredReadList,
assemblyResult.fullReferenceWithPadding,
assemblyResult.paddedReferenceLoc,
assemblyResult.regionForGenotyping.getLocation(),
getToolkit().getGenomeLocParser(),
metaDataTracker,
activeAllelesToGenotype );
for( final VariantContext call : calledHaplotypes.getCalls() ) {
// TODO -- uncomment this line once ART-based walkers have a proper RefMetaDataTracker.
// annotationEngine.annotateDBs(metaDataTracker, getToolkit().getGenomeLocParser().createGenomeLoc(call), call);
vcfWriter.add( call );
}
// TODO -- must disable if we are doing NCT, or set the output type of ! presorted
if ( bamWriter != null ) {
haplotypeBAMWriter.writeReadsAlignedToHaplotypes(assemblyResult.haplotypes, assemblyResult.paddedReferenceLoc,
bestHaplotypes,
assemblyResult.haplotypes,
calledHaplotypes.getCalledHaplotypes(),
stratifiedReadMap);
}
if( DEBUG ) { logger.info("----------------------------------------------------------------------------------"); }
return 1; // One active region was processed during this map call
return calledHaplotypes.getCalls();
}
private final static class AssemblyResult {
@ -613,12 +746,18 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
final ActiveRegion regionForGenotyping;
final byte[] fullReferenceWithPadding;
final GenomeLoc paddedReferenceLoc;
final boolean variationPresent;
private AssemblyResult(List<Haplotype> haplotypes, ActiveRegion regionForGenotyping, byte[] fullReferenceWithPadding, GenomeLoc paddedReferenceLoc) {
private AssemblyResult(List<Haplotype> haplotypes, ActiveRegion regionForGenotyping, byte[] fullReferenceWithPadding, GenomeLoc paddedReferenceLoc, boolean variationPresent) {
this.haplotypes = haplotypes;
this.regionForGenotyping = regionForGenotyping;
this.fullReferenceWithPadding = fullReferenceWithPadding;
this.paddedReferenceLoc = paddedReferenceLoc;
this.variationPresent = variationPresent;
}
public boolean isVariationPresent() {
return variationPresent && haplotypes.size() > 1;
}
}
@ -635,70 +774,49 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
// Create the reference haplotype which is the bases from the reference that make up the active region
finalizeActiveRegion(activeRegion); // merge overlapping fragments, clip adapter and low qual tails
final Haplotype referenceHaplotype = new Haplotype(activeRegion.getActiveRegionReference(referenceReader), true);
final byte[] fullReferenceWithPadding = activeRegion.getActiveRegionReference(referenceReader, REFERENCE_PADDING);
final GenomeLoc paddedReferenceLoc = getPaddedLoc(activeRegion);
final Haplotype referenceHaplotype = createReferenceHaplotype(activeRegion, paddedReferenceLoc);
final List<Haplotype> haplotypes = assemblyEngine.runLocalAssembly( activeRegion, referenceHaplotype, fullReferenceWithPadding, paddedReferenceLoc, activeAllelesToGenotype );
// Create ReadErrorCorrector object if requested - will be used within assembly engine.
ReadErrorCorrector readErrorCorrector = null;
if (errorCorrectReads)
readErrorCorrector = new ReadErrorCorrector(kmerLengthForReadErrorCorrection, MIN_TAIL_QUALITY_WITH_ERROR_CORRECTION, minObservationsForKmerToBeSolid, DEBUG,fullReferenceWithPadding);
if ( ! dontTrimActiveRegions ) {
return trimActiveRegion(activeRegion, haplotypes, fullReferenceWithPadding, paddedReferenceLoc);
} else {
// we don't want to or cannot create a trimmed active region, so go ahead and use the old one
return new AssemblyResult(haplotypes, activeRegion, fullReferenceWithPadding, paddedReferenceLoc);
try {
final List<Haplotype> haplotypes = assemblyEngine.runLocalAssembly( activeRegion, referenceHaplotype, fullReferenceWithPadding, paddedReferenceLoc, activeAllelesToGenotype,readErrorCorrector );
if ( ! dontTrimActiveRegions ) {
return trimActiveRegion(activeRegion, haplotypes, activeAllelesToGenotype, fullReferenceWithPadding, paddedReferenceLoc);
} else {
// we don't want to trim active regions, so go ahead and use the old one
return new AssemblyResult(haplotypes, activeRegion, fullReferenceWithPadding, paddedReferenceLoc, true);
}
} catch ( Exception e ) {
// Capture any exception that might be thrown, and write out the assembly failure BAM if requested
if ( captureAssemblyFailureBAM ) {
final SAMFileWriter writer = ReadUtils.createSAMFileWriterWithCompression(getToolkit().getSAMFileHeader(), true, "assemblyFailure.bam", 5);
for ( final GATKSAMRecord read : activeRegion.getReads() ) {
writer.addAlignment(read);
}
writer.close();
}
throw e;
}
}
/**
* Trim down the active region to just enough to properly genotype the events among the haplotypes
*
* This function merely creates the region, but it doesn't populate the reads back into the region
*
* @param region our full active region
* @param haplotypes the list of haplotypes we've created from assembly
* @param ref the reference bases over the full padded location
* @param refLoc the span of the reference bases
* @return a new ActiveRegion trimmed down to just what's needed for genotyping, or null if we couldn't do this successfully
* Helper function to create the reference haplotype out of the active region and a padded loc
* @param activeRegion the active region from which to generate the reference haplotype
* @param paddedReferenceLoc the GenomeLoc which includes padding and shows how big the reference haplotype should be
* @return a non-null haplotype
*/
private ActiveRegion createTrimmedRegion(final ActiveRegion region, final List<Haplotype> haplotypes, final byte[] ref, final GenomeLoc refLoc) {
EventMap.buildEventMapsForHaplotypes(haplotypes, ref, refLoc, DEBUG);
final TreeSet<VariantContext> allContexts = EventMap.getAllVariantContexts(haplotypes);
final GenomeLocParser parser = getToolkit().getGenomeLocParser();
if ( allContexts.isEmpty() ) // no variants, so just return the current region
return null;
final List<VariantContext> withinActiveRegion = new LinkedList<VariantContext>();
int pad = PADDING_AROUND_SNPS_FOR_CALLING;
GenomeLoc trimLoc = null;
for ( final VariantContext vc : allContexts ) {
final GenomeLoc vcLoc = parser.createGenomeLoc(vc);
if ( region.getLocation().overlapsP(vcLoc) ) {
if ( ! vc.isSNP() ) // if anything isn't a SNP use the bigger padding
pad = PADDING_AROUND_OTHERS_FOR_CALLING;
trimLoc = trimLoc == null ? vcLoc : trimLoc.endpointSpan(vcLoc);
withinActiveRegion.add(vc);
}
}
// we don't actually have anything in the region after removing variants that don't overlap the region's full location
if ( trimLoc == null ) return null;
final GenomeLoc maxSpan = getToolkit().getGenomeLocParser().createPaddedGenomeLoc(region.getLocation(), MAX_GENOTYPING_ACTIVE_REGION_EXTENSION);
final GenomeLoc idealSpan = getToolkit().getGenomeLocParser().createPaddedGenomeLoc(trimLoc, pad);
final GenomeLoc finalSpan = maxSpan.intersect(idealSpan);
final ActiveRegion trimmedRegion = region.trim(finalSpan);
if ( DEBUG ) {
logger.info("events : " + withinActiveRegion);
logger.info("trimLoc : " + trimLoc);
logger.info("pad : " + pad);
logger.info("idealSpan : " + idealSpan);
logger.info("maxSpan : " + maxSpan);
logger.info("finalSpan : " + finalSpan);
logger.info("regionSpan : " + trimmedRegion.getExtendedLoc() + " size is " + trimmedRegion.getExtendedLoc().size());
}
return trimmedRegion;
private Haplotype createReferenceHaplotype(final ActiveRegion activeRegion, final GenomeLoc paddedReferenceLoc) {
final Haplotype refHaplotype = new Haplotype(activeRegion.getActiveRegionReference(referenceReader), true);
refHaplotype.setAlignmentStartHapwrtRef(activeRegion.getExtendedLoc().getStart() - paddedReferenceLoc.getStart());
final Cigar c = new Cigar();
c.add(new CigarElement(refHaplotype.getBases().length, CigarOperator.M));
refHaplotype.setCigar(c);
return refHaplotype;
}
/**
@ -706,23 +824,33 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
*
* @param originalActiveRegion our full active region
* @param haplotypes the list of haplotypes we've created from assembly
* @param activeAllelesToGenotype additional alleles we might need to genotype (can be empty)
* @param fullReferenceWithPadding the reference bases over the full padded location
* @param paddedReferenceLoc the span of the reference bases
* @return an AssemblyResult containing the trimmed active region with all of the reads we should use
* trimmed down as well, and a revised set of haplotypes. If trimming failed this function
* may choose to use the originalActiveRegion without modification
* trimmed down as well, and a revised set of haplotypes. If trimming down the active region results
* in only the reference haplotype over the non-extended active region, returns null.
*/
private AssemblyResult trimActiveRegion(final ActiveRegion originalActiveRegion,
final List<Haplotype> haplotypes,
final List<VariantContext> activeAllelesToGenotype,
final byte[] fullReferenceWithPadding,
final GenomeLoc paddedReferenceLoc) {
final ActiveRegion trimmedActiveRegion = createTrimmedRegion(originalActiveRegion, haplotypes, fullReferenceWithPadding, paddedReferenceLoc);
if ( DEBUG ) logger.info("Trimming active region " + originalActiveRegion + " with " + haplotypes.size() + " haplotypes");
if ( trimmedActiveRegion == null )
return new AssemblyResult(haplotypes, originalActiveRegion, fullReferenceWithPadding, paddedReferenceLoc);
EventMap.buildEventMapsForHaplotypes(haplotypes, fullReferenceWithPadding, paddedReferenceLoc, DEBUG);
final TreeSet<VariantContext> allVariantsWithinFullActiveRegion = EventMap.getAllVariantContexts(haplotypes);
allVariantsWithinFullActiveRegion.addAll(activeAllelesToGenotype);
final ActiveRegion trimmedActiveRegion = trimmer.trimRegion(originalActiveRegion, allVariantsWithinFullActiveRegion);
if ( trimmedActiveRegion == null ) {
// there were no variants found within the active region itself, so just return null
if ( DEBUG ) logger.info("No variation found within the active region, skipping the region :-)");
return new AssemblyResult(haplotypes, originalActiveRegion, fullReferenceWithPadding, paddedReferenceLoc, false);
}
// trim down the haplotypes
final Set<Haplotype> haplotypeSet = new HashSet<Haplotype>(haplotypes.size());
final Set<Haplotype> haplotypeSet = new HashSet<>(haplotypes.size());
for ( final Haplotype h : haplotypes ) {
final Haplotype trimmed = h.trim(trimmedActiveRegion.getExtendedLoc());
if ( trimmed != null ) {
@ -733,13 +861,13 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
}
// create the final list of trimmed haplotypes
final List<Haplotype> trimmedHaplotypes = new ArrayList<Haplotype>(haplotypeSet);
final List<Haplotype> trimmedHaplotypes = new ArrayList<>(haplotypeSet);
// sort haplotypes to take full advantage of haplotype start offset optimizations in PairHMM
Collections.sort( trimmedHaplotypes, new HaplotypeBaseComparator() );
if ( DEBUG ) logger.info("Trimmed region to " + trimmedActiveRegion.getLocation() + " size " + trimmedActiveRegion.getLocation().size() + " reduced number of haplotypes from " + haplotypes.size() + " to only " + trimmedHaplotypes.size());
if ( DEBUG ) {
logger.info("Trimming haplotypes reduced number of haplotypes from " + haplotypes.size() + " to only " + trimmedHaplotypes.size());
for ( final Haplotype remaining: trimmedHaplotypes ) {
logger.info(" Remains: " + remaining + " cigar " + remaining.getCigar());
}
@ -747,7 +875,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
// trim down the reads and add them to the trimmed active region
final List<GATKSAMRecord> trimmedReads = new ArrayList<GATKSAMRecord>(originalActiveRegion.getReads().size());
final List<GATKSAMRecord> trimmedReads = new ArrayList<>(originalActiveRegion.getReads().size());
for( final GATKSAMRecord read : originalActiveRegion.getReads() ) {
final GATKSAMRecord clippedRead = ReadClipper.hardClipToRegion( read, trimmedActiveRegion.getExtendedLoc().getStart(), trimmedActiveRegion.getExtendedLoc().getStop() );
if( trimmedActiveRegion.readOverlapsRegion(clippedRead) && clippedRead.getReadLength() > 0 ) {
@ -757,22 +885,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
trimmedActiveRegion.clearReads();
trimmedActiveRegion.addAll(ReadUtils.sortReadsByCoordinate(trimmedReads));
return new AssemblyResult(trimmedHaplotypes, trimmedActiveRegion, fullReferenceWithPadding, paddedReferenceLoc);
}
/**
* Select the best N haplotypes according to their likelihoods, if appropriate
*
* @param haplotypes a list of haplotypes to consider
* @param stratifiedReadMap a map from samples -> read likelihoods
* @return the list of haplotypes to genotype
*/
protected List<Haplotype> selectBestHaplotypesForGenotyping(final List<Haplotype> haplotypes, final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap) {
if ( UG_engine.getUAC().GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ) {
return haplotypes;
} else {
return likelihoodCalculationEngine.selectBestHaplotypesFromEachSample(haplotypes, stratifiedReadMap, maxNumHaplotypesInPopulation);
}
return new AssemblyResult(trimmedHaplotypes, trimmedActiveRegion, fullReferenceWithPadding, paddedReferenceLoc, true);
}
//---------------------------------------------------------------------------------------------------------------
@ -787,12 +900,16 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
}
@Override
public Integer reduce(Integer cur, Integer sum) {
return cur + sum;
public Integer reduce(List<VariantContext> callsInRegion, Integer numCalledRegions) {
for( final VariantContext call : callsInRegion ) {
vcfWriter.add( call );
}
return (callsInRegion.isEmpty() ? 0 : 1) + numCalledRegions;
}
@Override
public void onTraversalDone(Integer result) {
likelihoodCalculationEngine.close();
logger.info("Ran local assembly on " + result + " active regions");
}
@ -804,32 +921,31 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
private void finalizeActiveRegion( final ActiveRegion activeRegion ) {
if( DEBUG ) { logger.info("Assembling " + activeRegion.getLocation() + " with " + activeRegion.size() + " reads: (with overlap region = " + activeRegion.getExtendedLoc() + ")"); }
final List<GATKSAMRecord> finalizedReadList = new ArrayList<GATKSAMRecord>();
final FragmentCollection<GATKSAMRecord> fragmentCollection = FragmentUtils.create( activeRegion.getReads() );
activeRegion.clearReads();
// Join overlapping paired reads to create a single longer read
finalizedReadList.addAll( fragmentCollection.getSingletonReads() );
for( final List<GATKSAMRecord> overlappingPair : fragmentCollection.getOverlappingPairs() ) {
finalizedReadList.addAll( FragmentUtils.mergeOverlappingPairedFragments(overlappingPair) );
}
// Loop through the reads hard clipping the adaptor and low quality tails
final List<GATKSAMRecord> readsToUse = new ArrayList<GATKSAMRecord>(finalizedReadList.size());
for( final GATKSAMRecord myRead : finalizedReadList ) {
final List<GATKSAMRecord> readsToUse = new ArrayList<>(activeRegion.getReads().size());
for( final GATKSAMRecord myRead : activeRegion.getReads() ) {
final GATKSAMRecord postAdapterRead = ( myRead.getReadUnmappedFlag() ? myRead : ReadClipper.hardClipAdaptorSequence( myRead ) );
if( postAdapterRead != null && !postAdapterRead.isEmpty() && postAdapterRead.getCigar().getReadLength() > 0 ) {
GATKSAMRecord clippedRead = useLowQualityBasesForAssembly ? postAdapterRead : ReadClipper.hardClipLowQualEnds( postAdapterRead, MIN_TAIL_QUALITY );
GATKSAMRecord clippedRead;
if (errorCorrectReads)
clippedRead = ReadClipper.hardClipLowQualEnds( postAdapterRead, MIN_TAIL_QUALITY_WITH_ERROR_CORRECTION );
else if (useLowQualityBasesForAssembly)
clippedRead = postAdapterRead;
else // default case: clip low qual ends of reads
clippedRead= ReadClipper.hardClipLowQualEnds( postAdapterRead, MIN_TAIL_QUALITY );
// revert soft clips so that we see the alignment start and end assuming the soft clips are all matches
// TODO -- WARNING -- still possibility that unclipping the soft clips will introduce bases that aren't
// TODO -- truly in the extended region, as the unclipped bases might actually include a deletion
// TODO -- w.r.t. the reference. What really needs to happen is that kmers that occur before the
// TODO -- reference haplotype start must be removed
clippedRead = ReadClipper.revertSoftClippedBases(clippedRead);
// uncomment to remove hard clips from consideration at all
//clippedRead = ReadClipper.hardClipSoftClippedBases(clippedRead);
if ( dontUseSoftClippedBases ) {
// uncomment to remove hard clips from consideration at all
clippedRead = ReadClipper.hardClipSoftClippedBases(clippedRead);
} else {
// revert soft clips so that we see the alignment start and end assuming the soft clips are all matches
// TODO -- WARNING -- still possibility that unclipping the soft clips will introduce bases that aren't
// TODO -- truly in the extended region, as the unclipped bases might actually include a deletion
// TODO -- w.r.t. the reference. What really needs to happen is that kmers that occur before the
// TODO -- reference haplotype start must be removed
clippedRead = ReadClipper.revertSoftClippedBases(clippedRead);
}
clippedRead = ReadClipper.hardClipToRegion( clippedRead, activeRegion.getExtendedLoc().getStart(), activeRegion.getExtendedLoc().getStop() );
if( activeRegion.readOverlapsRegion(clippedRead) && clippedRead.getReadLength() > 0 ) {
@ -839,13 +955,14 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
}
}
activeRegion.clearReads();
activeRegion.addAll(DownsamplingUtils.levelCoverageByPosition(ReadUtils.sortReadsByCoordinate(readsToUse), maxReadsInRegionPerSample, minReadsPerAlignmentStart));
}
private List<GATKSAMRecord> filterNonPassingReads( final org.broadinstitute.sting.utils.activeregion.ActiveRegion activeRegion ) {
final List<GATKSAMRecord> readsToRemove = new ArrayList<GATKSAMRecord>();
private Set<GATKSAMRecord> filterNonPassingReads( final org.broadinstitute.sting.utils.activeregion.ActiveRegion activeRegion ) {
final Set<GATKSAMRecord> readsToRemove = new LinkedHashSet<>();
for( final GATKSAMRecord rec : activeRegion.getReads() ) {
if( rec.getReadLength() < 10 || rec.getMappingQuality() < 20 || BadMateFilter.hasBadMate(rec) || (keepRG != null && !rec.getReadGroup().getId().equals(keepRG)) ) {
if( rec.getReadLength() < MIN_READ_LENGTH || rec.getMappingQuality() < 20 || BadMateFilter.hasBadMate(rec) || (keepRG != null && !rec.getReadGroup().getId().equals(keepRG)) ) {
readsToRemove.add(rec);
}
}
@ -859,12 +976,12 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
return getToolkit().getGenomeLocParser().createGenomeLoc(activeRegion.getExtendedLoc().getContig(), padLeft, padRight);
}
private Map<String, List<GATKSAMRecord>> splitReadsBySample( final List<GATKSAMRecord> reads ) {
final Map<String, List<GATKSAMRecord>> returnMap = new HashMap<String, List<GATKSAMRecord>>();
private Map<String, List<GATKSAMRecord>> splitReadsBySample( final Collection<GATKSAMRecord> reads ) {
final Map<String, List<GATKSAMRecord>> returnMap = new HashMap<>();
for( final String sample : samplesList) {
List<GATKSAMRecord> readList = returnMap.get( sample );
if( readList == null ) {
readList = new ArrayList<GATKSAMRecord>();
readList = new ArrayList<>();
returnMap.put(sample, readList);
}
}

View File

@ -46,9 +46,7 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.*;
/**
* generic utility class that counts kmers
@ -97,6 +95,20 @@ public class KMerCounter {
return countsByKMer.values();
}
/**
* Get kmers that have minCount or greater in this counter
* @param minCount only return kmers with count >= this value
* @return a non-null collection of kmers
*/
public Collection<Kmer> getKmersWithCountsAtLeast(final int minCount) {
final List<Kmer> result = new LinkedList<Kmer>();
for ( final CountedKmer countedKmer : getCountedKmers() ) {
if ( countedKmer.count >= minCount )
result.add(countedKmer.kmer);
}
return result;
}
/**
* Remove all current counts, resetting the counter to an empty state
*/

View File

@ -46,7 +46,11 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Requires;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Fast wrapper for byte[] kmers
@ -149,6 +153,23 @@ public class Kmer {
return bases;
}
/**
* Backdoor method for fast base peeking: avoids copying like bases() and doesn't modify internal state.
* Intended to be used for fast computation of neighboring kmers
* @return Reference to complete bases stores in this kmer
* WARNING: UNSAFE, caller should NEVER modify bases. Speed/safety tradeoff!!
*/
private byte[] unsafePeekAtBases() {
return bases;
}
/**
* Get a string representation of the bases of this kmer
* @return a non-null string
*/
public String baseString() {
return new String(bases());
}
/**
* The length of this kmer
* @return an integer >= 0
@ -157,6 +178,45 @@ public class Kmer {
return length;
}
/**
* Gets a set of differing positions and bases from another k-mer, limiting up to a max distance.
* For example, if this = "ACATT" and other = "ACGGT":
* - if maxDistance < 2 then -1 will be returned, since distance between kmers is 2.
* - If maxDistance >=2, then 2 will be returned, and arrays will be filled as follows:
* differingIndeces = {2,3}
* differingBases = {'G','G'}
* @param other Other k-mer to test
* @param maxDistance Maximum distance to search. If this and other k-mers are beyond this Hamming distance,
* search is aborted and a null is returned
* @param differingIndeces Array with indices of differing bytes in array
* @param differingBases Actual differing bases
* @return Set of mappings of form (int->byte), where each elements represents index
* of k-mer array where bases mismatch, and the byte is the base from other kmer.
* If both k-mers differ by more than maxDistance, returns null
*/
@Requires({"other != null","differingIndeces != null","differingBases != null",
"differingIndeces.size>=maxDistance","differingBases.size>=maxDistance"})
public int getDifferingPositions(final Kmer other,
final int maxDistance,
final int[] differingIndeces,
final byte[] differingBases) {
int dist = 0;
if (length == other.length()) {
final byte[] f2 = other.unsafePeekAtBases();
for (int i=0; i < length; i++)
if(bases[start+i] != f2[i]) {
differingIndeces[dist] = i;
differingBases[dist++] = f2[i];
if (dist > maxDistance)
return -1;
}
}
return dist;
}
@Override
public String toString() {
return "Kmer{" + new String(bases()) + "}";

View File

@ -48,58 +48,104 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import net.sf.samtools.SAMUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.QualityUtils;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.haplotype.HaplotypeScoreComparator;
import org.broadinstitute.sting.utils.pairhmm.*;
import org.broadinstitute.sting.utils.pairhmm.Log10PairHMM;
import org.broadinstitute.sting.utils.pairhmm.LoglessPairHMM;
import org.broadinstitute.sting.utils.pairhmm.PairHMM;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.variant.variantcontext.Allele;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.util.*;
public class LikelihoodCalculationEngine {
private final static Logger logger = Logger.getLogger(LikelihoodCalculationEngine.class);
private static final double LOG_ONE_HALF = -Math.log10(2.0);
private final byte constantGCP;
private final double log10globalReadMismappingRate;
private final boolean DEBUG;
private final PairHMM pairHMM;
private final int minReadLength = 20;
private final PairHMM.HMM_IMPLEMENTATION hmmType;
private final ThreadLocal<PairHMM> pairHMM = new ThreadLocal<PairHMM>() {
@Override
protected PairHMM initialValue() {
switch (hmmType) {
case EXACT: return new Log10PairHMM(true);
case ORIGINAL: return new Log10PairHMM(false);
case LOGLESS_CACHING: return new LoglessPairHMM();
default:
throw new UserException.BadArgumentValue("pairHMM", "Specified pairHMM implementation is unrecognized or incompatible with the HaplotypeCaller. Acceptable options are ORIGINAL, EXACT, CACHING, and LOGLESS_CACHING.");
}
}
};
private final static boolean WRITE_LIKELIHOODS_TO_FILE = false;
private final static String LIKELIHOODS_FILENAME = "likelihoods.txt";
private final PrintStream likelihoodsStream;
/**
* The expected rate of random sequencing errors for a read originating from its true haplotype.
*
* For example, if this is 0.01, then we'd expect 1 error per 100 bp.
*/
private final double EXPECTED_ERROR_RATE_PER_BASE = 0.02;
public LikelihoodCalculationEngine( final byte constantGCP, final boolean debug, final PairHMM.HMM_IMPLEMENTATION hmmType ) {
switch (hmmType) {
case EXACT:
pairHMM = new Log10PairHMM(true);
break;
case ORIGINAL:
pairHMM = new Log10PairHMM(false);
break;
case LOGLESS_CACHING:
pairHMM = new LoglessPairHMM();
break;
default:
throw new UserException.BadArgumentValue("pairHMM", "Specified pairHMM implementation is unrecognized or incompatible with the HaplotypeCaller. Acceptable options are ORIGINAL, EXACT, CACHING, and LOGLESS_CACHING.");
}
private final static double EXPECTED_ERROR_RATE_PER_BASE = 0.02;
/**
* Create a new LikelihoodCalculationEngine using provided parameters and hmm to do its calculations
*
* @param constantGCP the gap continuation penalty to use with the PairHMM
* @param debug should we emit debugging information during the calculation?
* @param hmmType the type of the HMM to use
* @param log10globalReadMismappingRate the global mismapping probability, in log10(prob) units. A value of
* -3 means that the chance that a read doesn't actually belong at this
* location in the genome is 1 in 1000. The effect of this parameter is
* to cap the maximum likelihood difference between the reference haplotype
* and the best alternative haplotype by -3 log units. So if the best
* haplotype is at -10 and this parameter has a value of -3 then even if the
* reference haplotype gets a score of -100 from the pairhmm it will be
* assigned a likelihood of -13.
*/
public LikelihoodCalculationEngine( final byte constantGCP, final boolean debug, final PairHMM.HMM_IMPLEMENTATION hmmType, final double log10globalReadMismappingRate ) {
this.hmmType = hmmType;
this.constantGCP = constantGCP;
DEBUG = debug;
this.DEBUG = debug;
this.log10globalReadMismappingRate = log10globalReadMismappingRate;
if ( WRITE_LIKELIHOODS_TO_FILE ) {
try {
likelihoodsStream = new PrintStream(new FileOutputStream(new File(LIKELIHOODS_FILENAME)));
} catch ( FileNotFoundException e ) {
throw new RuntimeException(e);
}
} else {
likelihoodsStream = null;
}
}
public LikelihoodCalculationEngine() {
this((byte)10, false, PairHMM.HMM_IMPLEMENTATION.LOGLESS_CACHING, -3);
}
public void close() {
if ( likelihoodsStream != null ) likelihoodsStream.close();
}
/**
* Initialize our pairHMM with parameters appropriate to the haplotypes and reads we're going to evaluate
*
@ -124,7 +170,7 @@ public class LikelihoodCalculationEngine {
}
// initialize arrays to hold the probabilities of being in the match, insertion and deletion cases
pairHMM.initialize(X_METRIC_LENGTH, Y_METRIC_LENGTH);
pairHMM.get().initialize(X_METRIC_LENGTH, Y_METRIC_LENGTH);
}
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods( final List<Haplotype> haplotypes, final Map<String, List<GATKSAMRecord>> perSampleReadList ) {
@ -132,9 +178,8 @@ public class LikelihoodCalculationEngine {
initializePairHMM(haplotypes, perSampleReadList);
// Add likelihoods for each sample's reads to our stratifiedReadMap
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = new HashMap<String, PerReadAlleleLikelihoodMap>();
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = new LinkedHashMap<>();
for( final Map.Entry<String, List<GATKSAMRecord>> sampleEntry : perSampleReadList.entrySet() ) {
//if( DEBUG ) { System.out.println("Evaluating sample " + sample + " with " + perSampleReadList.get( sample ).size() + " passing reads"); }
// evaluate the likelihood of the reads given those haplotypes
final PerReadAlleleLikelihoodMap map = computeReadLikelihoods(haplotypes, sampleEntry.getValue());
@ -152,17 +197,16 @@ public class LikelihoodCalculationEngine {
private PerReadAlleleLikelihoodMap computeReadLikelihoods( final List<Haplotype> haplotypes, final List<GATKSAMRecord> reads) {
// first, a little set up to get copies of the Haplotypes that are Alleles (more efficient than creating them each time)
final int numHaplotypes = haplotypes.size();
final Map<Haplotype, Allele> alleleVersions = new HashMap<Haplotype, Allele>(numHaplotypes);
final Map<Haplotype, Allele> alleleVersions = new LinkedHashMap<>(numHaplotypes);
Allele refAllele = null;
for ( final Haplotype haplotype : haplotypes ) {
alleleVersions.put(haplotype, Allele.create(haplotype, true));
final Allele allele = Allele.create(haplotype, true);
alleleVersions.put(haplotype, allele);
if ( haplotype.isReference() ) refAllele = allele;
}
final PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap = new PerReadAlleleLikelihoodMap();
for( final GATKSAMRecord read : reads ) {
if ( read.getReadLength() < minReadLength )
// don't consider any reads that have a read length < the minimum
continue;
final byte[] overallGCP = new byte[read.getReadLength()];
Arrays.fill( overallGCP, constantGCP ); // Is there a way to derive empirical estimates for this from the data?
// NOTE -- must clone anything that gets modified here so we don't screw up future uses of the read
@ -177,14 +221,45 @@ public class LikelihoodCalculationEngine {
readQuals[kkk] = ( readQuals[kkk] < (byte) 18 ? QualityUtils.MIN_USABLE_Q_SCORE : readQuals[kkk] );
}
// keep track of the reference likelihood and the best non-ref likelihood
double refLog10l = Double.NEGATIVE_INFINITY;
double bestNonReflog10L = Double.NEGATIVE_INFINITY;
// iterate over all haplotypes, calculating the likelihood of the read for each haplotype
for( int jjj = 0; jjj < numHaplotypes; jjj++ ) {
final Haplotype haplotype = haplotypes.get(jjj);
final boolean isFirstHaplotype = jjj == 0;
final double log10l = pairHMM.computeReadLikelihoodGivenHaplotypeLog10(haplotype.getBases(),
final double log10l = pairHMM.get().computeReadLikelihoodGivenHaplotypeLog10(haplotype.getBases(),
read.getReadBases(), readQuals, readInsQuals, readDelQuals, overallGCP, isFirstHaplotype);
if ( WRITE_LIKELIHOODS_TO_FILE ) {
likelihoodsStream.printf("%s %s %s %s %s %s %f%n",
haplotype.getBaseString(),
new String(read.getReadBases()),
SAMUtils.phredToFastq(readQuals),
SAMUtils.phredToFastq(readInsQuals),
SAMUtils.phredToFastq(readDelQuals),
SAMUtils.phredToFastq(overallGCP),
log10l);
}
if ( haplotype.isNonReference() )
bestNonReflog10L = Math.max(bestNonReflog10L, log10l);
else
refLog10l = log10l;
perReadAlleleLikelihoodMap.add(read, alleleVersions.get(haplotype), log10l);
}
// ensure that the reference haplotype is no worse than the best non-ref haplotype minus the global
// mismapping rate. This protects us from the case where the assembly has produced haplotypes
// that are very divergent from reference, but are supported by only one read. In effect
// we capping how badly scoring the reference can be for any read by the chance that the read
// itself just doesn't belong here
final double worstRefLog10Allowed = bestNonReflog10L + log10globalReadMismappingRate;
if ( refLog10l < (worstRefLog10Allowed) ) {
perReadAlleleLikelihoodMap.add(read, refAllele, worstRefLog10Allowed);
}
}
return perReadAlleleLikelihoodMap;
@ -223,7 +298,7 @@ public class LikelihoodCalculationEngine {
// Compute log10(10^x1/2 + 10^x2/2) = log10(10^x1+10^x2)-log10(2)
// First term is approximated by Jacobian log with table lookup.
haplotypeLikelihood += ReadUtils.getMeanRepresentativeReadCount( entry.getKey() ) *
( MathUtils.approximateLog10SumLog10(entry.getValue().get(iii_allele), entry.getValue().get(jjj_allele)) + LOG_ONE_HALF );
( MathUtils.approximateLog10SumLog10(entry.getValue().get(iii_allele), entry.getValue().get(jjj_allele)) + MathUtils.LOG_ONE_HALF );
}
}
haplotypeLikelihoodMatrix[iii][jjj] = haplotypeLikelihood;
@ -321,11 +396,11 @@ public class LikelihoodCalculationEngine {
if ( haplotypes.size() == 2 ) return haplotypes; // fast path -- we'll always want to use 2 haplotypes
// all of the haplotypes that at least one sample called as one of the most likely
final Set<Haplotype> selectedHaplotypes = new HashSet<Haplotype>();
final Set<Haplotype> selectedHaplotypes = new HashSet<>();
selectedHaplotypes.add(findReferenceHaplotype(haplotypes)); // ref is always one of the selected
// our annoying map from allele -> haplotype
final Map<Allele, Haplotype> allele2Haplotype = new HashMap<Allele, Haplotype>();
final Map<Allele, Haplotype> allele2Haplotype = new HashMap<>();
for ( final Haplotype h : haplotypes ) {
h.setScore(h.isReference() ? Double.MAX_VALUE : 0.0); // set all of the scores to 0 (lowest value) for all non-ref haplotypes
allele2Haplotype.put(Allele.create(h, h.isReference()), h);

View File

@ -46,28 +46,345 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.*;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.sting.utils.smithwaterman.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.smithwaterman.SWParameterSet;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import java.io.File;
import java.io.PrintStream;
import java.util.List;
import java.util.*;
/**
* Created by IntelliJ IDEA.
* Abstract base class for all HaplotypeCaller assemblers
*
* User: ebanks
* Date: Mar 14, 2011
*/
public abstract class LocalAssemblyEngine {
public static final byte DEFAULT_MIN_BASE_QUALITY_TO_USE = (byte) 8;
private final static Logger logger = Logger.getLogger(LocalAssemblyEngine.class);
/**
* If false, we will only write out a region around the reference source
*/
private final static boolean PRINT_FULL_GRAPH_FOR_DEBUGGING = true;
public static final byte DEFAULT_MIN_BASE_QUALITY_TO_USE = (byte) 8;
private static final int MIN_HAPLOTYPE_REFERENCE_LENGTH = 30;
protected final int numBestHaplotypesPerGraph;
protected boolean debug = false;
protected boolean allowCyclesInKmerGraphToGeneratePaths = false;
protected boolean debugGraphTransformations = false;
protected boolean recoverDanglingTails = true;
protected PrintStream graphWriter = null;
protected byte minBaseQualityToUseInAssembly = DEFAULT_MIN_BASE_QUALITY_TO_USE;
protected int pruneFactor = 2;
protected boolean errorCorrectKmers = false;
protected LocalAssemblyEngine() { }
private PrintStream graphWriter = null;
/**
* Create a new LocalAssemblyEngine with all default parameters, ready for use
* @param numBestHaplotypesPerGraph the number of haplotypes to generate for each assembled graph
*/
protected LocalAssemblyEngine(final int numBestHaplotypesPerGraph) {
if ( numBestHaplotypesPerGraph < 1 ) throw new IllegalArgumentException("numBestHaplotypesPerGraph should be >= 1 but got " + numBestHaplotypesPerGraph);
this.numBestHaplotypesPerGraph = numBestHaplotypesPerGraph;
}
/**
* Main subclass function: given reads and a reference haplotype give us graphs to use for constructing
* non-reference haplotypes.
*
* @param reads the reads we're going to assemble
* @param refHaplotype the reference haplotype
* @return a non-null list of reads
*/
protected abstract List<SeqGraph> assemble(List<GATKSAMRecord> reads, Haplotype refHaplotype, List<Haplotype> activeAlleleHaplotypes);
protected List<SeqGraph> assemble(List<GATKSAMRecord> reads, Haplotype refHaplotype) {
return assemble(reads, refHaplotype, Collections.<Haplotype>emptyList());
}
/**
* Main entry point into the assembly engine. Build a set of deBruijn graphs out of the provided reference sequence and list of reads
* @param activeRegion ActiveRegion object holding the reads which are to be used during assembly
* @param refHaplotype reference haplotype object
* @param fullReferenceWithPadding byte array holding the reference sequence with padding
* @param refLoc GenomeLoc object corresponding to the reference sequence with padding
* @param activeAllelesToGenotype the alleles to inject into the haplotypes during GGA mode
* @param readErrorCorrector a ReadErrorCorrector object, if read are to be corrected before assembly. Can be null if no error corrector is to be used.
* @return a non-empty list of all the haplotypes that are produced during assembly
*/
public List<Haplotype> runLocalAssembly(final ActiveRegion activeRegion,
final Haplotype refHaplotype,
final byte[] fullReferenceWithPadding,
final GenomeLoc refLoc,
final List<VariantContext> activeAllelesToGenotype,
final ReadErrorCorrector readErrorCorrector) {
if( activeRegion == null ) { throw new IllegalArgumentException("Assembly engine cannot be used with a null ActiveRegion."); }
if( refHaplotype == null ) { throw new IllegalArgumentException("Reference haplotype cannot be null."); }
if( fullReferenceWithPadding.length != refLoc.size() ) { throw new IllegalArgumentException("Reference bases and reference loc must be the same size."); }
if( pruneFactor < 0 ) { throw new IllegalArgumentException("Pruning factor cannot be negative"); }
// create the list of artificial haplotypes that should be added to the graph for GGA mode
final List<Haplotype> activeAlleleHaplotypes = createActiveAlleleHaplotypes(refHaplotype, activeAllelesToGenotype, activeRegion.getExtendedLoc());
// error-correct reads before clipping low-quality tails: some low quality bases might be good and we want to recover them
final List<GATKSAMRecord> correctedReads;
if (readErrorCorrector != null) {
// now correct all reads in active region after filtering/downsampling
// Note that original reads in active region are NOT modified by default, since they will be used later for GL computation,
// and we only want the read-error corrected reads for graph building.
readErrorCorrector.addReadsToKmers(activeRegion.getReads());
correctedReads = new ArrayList<>(readErrorCorrector.correctReads(activeRegion.getReads()));
}
else correctedReads = activeRegion.getReads();
// create the graphs by calling our subclass assemble method
final List<SeqGraph> graphs = assemble(correctedReads, refHaplotype, activeAlleleHaplotypes);
// do some QC on the graphs
for ( final SeqGraph graph : graphs ) { sanityCheckGraph(graph, refHaplotype); }
// print the graphs if the appropriate debug option has been turned on
if ( graphWriter != null ) { printGraphs(graphs); }
// find the best paths in the graphs and return them as haplotypes
return findBestPaths( graphs, refHaplotype, refLoc, activeRegion.getExtendedLoc() );
}
/**
* Create the list of artificial GGA-mode haplotypes by injecting each of the provided alternate alleles into the reference haplotype
* @param refHaplotype the reference haplotype
* @param activeAllelesToGenotype the list of alternate alleles in VariantContexts
* @param activeRegionWindow the window containing the reference haplotype
* @return a non-null list of haplotypes
*/
private List<Haplotype> createActiveAlleleHaplotypes(final Haplotype refHaplotype, final List<VariantContext> activeAllelesToGenotype, final GenomeLoc activeRegionWindow) {
final Set<Haplotype> returnHaplotypes = new LinkedHashSet<>();
final int activeRegionStart = refHaplotype.getAlignmentStartHapwrtRef();
for( final VariantContext compVC : activeAllelesToGenotype ) {
for( final Allele compAltAllele : compVC.getAlternateAlleles() ) {
final Haplotype insertedRefHaplotype = refHaplotype.insertAllele(compVC.getReference(), compAltAllele, activeRegionStart + compVC.getStart() - activeRegionWindow.getStart(), compVC.getStart());
if( insertedRefHaplotype != null ) { // can be null if the requested allele can't be inserted into the haplotype
returnHaplotypes.add(insertedRefHaplotype);
}
}
}
return new ArrayList<>(returnHaplotypes);
}
@Ensures({"result.contains(refHaplotype)"})
protected List<Haplotype> findBestPaths(final List<SeqGraph> graphs, final Haplotype refHaplotype, final GenomeLoc refLoc, final GenomeLoc activeRegionWindow) {
// add the reference haplotype separately from all the others to ensure that it is present in the list of haplotypes
final Set<Haplotype> returnHaplotypes = new LinkedHashSet<>();
returnHaplotypes.add( refHaplotype );
final int activeRegionStart = refHaplotype.getAlignmentStartHapwrtRef();
for( final SeqGraph graph : graphs ) {
final SeqVertex source = graph.getReferenceSourceVertex();
final SeqVertex sink = graph.getReferenceSinkVertex();
if ( source == null || sink == null ) throw new IllegalArgumentException("Both source and sink cannot be null but got " + source + " and sink " + sink + " for graph "+ graph);
final KBestPaths<SeqVertex,BaseEdge> pathFinder = new KBestPaths<>(allowCyclesInKmerGraphToGeneratePaths);
for ( final Path<SeqVertex,BaseEdge> path : pathFinder.getKBestPaths(graph, numBestHaplotypesPerGraph, source, sink) ) {
Haplotype h = new Haplotype( path.getBases() );
if( !returnHaplotypes.contains(h) ) {
final Cigar cigar = path.calculateCigar(refHaplotype.getBases());
if ( cigar == null ) {
// couldn't produce a meaningful alignment of haplotype to reference, fail quietly
continue;
} else if( cigar.isEmpty() ) {
throw new IllegalStateException("Smith-Waterman alignment failure. Cigar = " + cigar + " with reference length " + cigar.getReferenceLength() +
" but expecting reference length of " + refHaplotype.getCigar().getReferenceLength());
} else if ( pathIsTooDivergentFromReference(cigar) || cigar.getReferenceLength() < MIN_HAPLOTYPE_REFERENCE_LENGTH ) {
// N cigar elements means that a bubble was too divergent from the reference so skip over this path
continue;
} else if( cigar.getReferenceLength() != refHaplotype.getCigar().getReferenceLength() ) { // SW failure
throw new IllegalStateException("Smith-Waterman alignment failure. Cigar = " + cigar + " with reference length "
+ cigar.getReferenceLength() + " but expecting reference length of " + refHaplotype.getCigar().getReferenceLength()
+ " ref = " + refHaplotype + " path " + new String(path.getBases()));
}
h.setCigar(cigar);
h.setAlignmentStartHapwrtRef(activeRegionStart);
h.setScore(path.getScore());
returnHaplotypes.add(h);
if ( debug )
logger.info("Adding haplotype " + h.getCigar() + " from graph with kmer " + graph.getKmerSize());
}
}
}
// add genome locs to the haplotypes
for ( final Haplotype h : returnHaplotypes ) h.setGenomeLocation(activeRegionWindow);
if ( returnHaplotypes.size() < returnHaplotypes.size() )
logger.info("Found " + returnHaplotypes.size() + " candidate haplotypes of " + returnHaplotypes.size() + " possible combinations to evaluate every read against at " + refLoc);
if( debug ) {
if( returnHaplotypes.size() > 1 ) {
logger.info("Found " + returnHaplotypes.size() + " candidate haplotypes of " + returnHaplotypes.size() + " possible combinations to evaluate every read against.");
} else {
logger.info("Found only the reference haplotype in the assembly graph.");
}
for( final Haplotype h : returnHaplotypes ) {
logger.info( h.toString() );
logger.info( "> Cigar = " + h.getCigar() + " : " + h.getCigar().getReferenceLength() + " score " + h.getScore() + " ref " + h.isReference());
}
}
return new ArrayList<>(returnHaplotypes);
}
/**
* We use CigarOperator.N as the signal that an incomplete or too divergent bubble was found during bubble traversal
* @param c the cigar to test
* @return true if we should skip over this path
*/
@Requires("c != null")
private boolean pathIsTooDivergentFromReference( final Cigar c ) {
for( final CigarElement ce : c.getCigarElements() ) {
if( ce.getOperator().equals(CigarOperator.N) ) {
return true;
}
}
return false;
}
/**
* Print graph to file if debugGraphTransformations is enabled
* @param graph the graph to print
* @param file the destination file
*/
protected void printDebugGraphTransform(final BaseGraph graph, final File file) {
if ( debugGraphTransformations ) {
if ( PRINT_FULL_GRAPH_FOR_DEBUGGING )
graph.printGraph(file, pruneFactor);
else
graph.subsetToRefSource().printGraph(file, pruneFactor);
}
}
protected SeqGraph cleanupSeqGraph(final SeqGraph seqGraph) {
printDebugGraphTransform(seqGraph, new File("sequenceGraph.1.dot"));
// the very first thing we need to do is zip up the graph, or pruneGraph will be too aggressive
seqGraph.zipLinearChains();
printDebugGraphTransform(seqGraph, new File("sequenceGraph.2.zipped.dot"));
// now go through and prune the graph, removing vertices no longer connected to the reference chain
// IMPORTANT: pruning must occur before we call simplifyGraph, as simplifyGraph adds 0 weight
// edges to maintain graph connectivity.
seqGraph.pruneGraph(pruneFactor);
seqGraph.removeVerticesNotConnectedToRefRegardlessOfEdgeDirection();
printDebugGraphTransform(seqGraph, new File("sequenceGraph.3.pruned.dot"));
seqGraph.simplifyGraph();
printDebugGraphTransform(seqGraph, new File("sequenceGraph.4.merged.dot"));
// The graph has degenerated in some way, so the reference source and/or sink cannot be id'd. Can
// happen in cases where for example the reference somehow manages to acquire a cycle, or
// where the entire assembly collapses back into the reference sequence.
if ( seqGraph.getReferenceSourceVertex() == null || seqGraph.getReferenceSinkVertex() == null )
return null;
seqGraph.removePathsNotConnectedToRef();
seqGraph.simplifyGraph();
if ( seqGraph.vertexSet().size() == 1 ) {
// we've perfectly assembled into a single reference haplotype, add a empty seq vertex to stop
// the code from blowing up.
// TODO -- ref properties should really be on the vertices, not the graph itself
final SeqVertex complete = seqGraph.vertexSet().iterator().next();
final SeqVertex dummy = new SeqVertex("");
seqGraph.addVertex(dummy);
seqGraph.addEdge(complete, dummy, new BaseEdge(true, 0));
}
printDebugGraphTransform(seqGraph, new File("sequenceGraph.5.final.dot"));
return seqGraph;
}
/**
* Perform general QC on the graph to make sure something hasn't gone wrong during assembly
* @param graph the graph to check
* @param refHaplotype the reference haplotype
*/
private <T extends BaseVertex, E extends BaseEdge> void sanityCheckGraph(final BaseGraph<T,E> graph, final Haplotype refHaplotype) {
sanityCheckReferenceGraph(graph, refHaplotype);
}
/**
* Make sure the reference sequence is properly represented in the provided graph
*
* @param graph the graph to check
* @param refHaplotype the reference haplotype
*/
private <T extends BaseVertex, E extends BaseEdge> void sanityCheckReferenceGraph(final BaseGraph<T,E> graph, final Haplotype refHaplotype) {
if( graph.getReferenceSourceVertex() == null ) {
throw new IllegalStateException("All reference graphs must have a reference source vertex.");
}
if( graph.getReferenceSinkVertex() == null ) {
throw new IllegalStateException("All reference graphs must have a reference sink vertex.");
}
if( !Arrays.equals(graph.getReferenceBytes(graph.getReferenceSourceVertex(), graph.getReferenceSinkVertex(), true, true), refHaplotype.getBases()) ) {
throw new IllegalStateException("Mismatch between the reference haplotype and the reference assembly graph path. for graph " + graph +
" graph = " + new String(graph.getReferenceBytes(graph.getReferenceSourceVertex(), graph.getReferenceSinkVertex(), true, true)) +
" haplotype = " + new String(refHaplotype.getBases())
);
}
}
/**
* Print the generated graphs to the graphWriter
* @param graphs a non-null list of graphs to print out
*/
private void printGraphs(final List<SeqGraph> graphs) {
final int writeFirstGraphWithSizeSmallerThan = 50;
graphWriter.println("digraph assemblyGraphs {");
for( final SeqGraph graph : graphs ) {
if ( debugGraphTransformations && graph.getKmerSize() >= writeFirstGraphWithSizeSmallerThan ) {
logger.info("Skipping writing of graph with kmersize " + graph.getKmerSize());
continue;
}
graph.printGraph(graphWriter, false, pruneFactor);
if ( debugGraphTransformations )
break;
}
graphWriter.println("}");
}
// -----------------------------------------------------------------------------------------------
//
// getter / setter routines for generic assembler properties
//
// -----------------------------------------------------------------------------------------------
public int getPruneFactor() {
return pruneFactor;
@ -85,10 +402,6 @@ public abstract class LocalAssemblyEngine {
this.errorCorrectKmers = errorCorrectKmers;
}
public PrintStream getGraphWriter() {
return graphWriter;
}
public void setGraphWriter(PrintStream graphWriter) {
this.graphWriter = graphWriter;
}
@ -101,5 +414,35 @@ public abstract class LocalAssemblyEngine {
this.minBaseQualityToUseInAssembly = minBaseQualityToUseInAssembly;
}
public abstract List<Haplotype> runLocalAssembly(ActiveRegion activeRegion, Haplotype refHaplotype, byte[] fullReferenceWithPadding, GenomeLoc refLoc, List<VariantContext> activeAllelesToGenotype);
public boolean isDebug() {
return debug;
}
public void setDebug(boolean debug) {
this.debug = debug;
}
public boolean isAllowCyclesInKmerGraphToGeneratePaths() {
return allowCyclesInKmerGraphToGeneratePaths;
}
public void setAllowCyclesInKmerGraphToGeneratePaths(boolean allowCyclesInKmerGraphToGeneratePaths) {
this.allowCyclesInKmerGraphToGeneratePaths = allowCyclesInKmerGraphToGeneratePaths;
}
public boolean isDebugGraphTransformations() {
return debugGraphTransformations;
}
public void setDebugGraphTransformations(boolean debugGraphTransformations) {
this.debugGraphTransformations = debugGraphTransformations;
}
public boolean isRecoverDanglingTails() {
return recoverDanglingTails;
}
public void setRecoverDanglingTails(boolean recoverDanglingTails) {
this.recoverDanglingTails = recoverDanglingTails;
}
}

View File

@ -0,0 +1,526 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Requires;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.utils.BaseUtils;
import org.broadinstitute.sting.utils.QualityUtils;
import org.broadinstitute.sting.utils.clipping.ReadClipper;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import java.util.*;
/**
* Utility class that error-corrects reads.
* Main idea: An error in a read will appear as a bubble in a k-mer (de Bruijn) graph and such bubble will have very low multiplicity.
* Hence, read errors will appear as "sparse" kmers with very little support.
* Historically, the most common approach to error-correct reads before assembly has been to first compute the kmer spectrum of the reads,
* defined as the kmer composition of a set of reads along with the multiplicity of each kmer.
* First-generation correctors like the Euler corrector (Pevzner 2001) mapped low frequency kmers (kmers appearing say below N times)
* into high frequency ones that lied within a certain Hamming or edit distance.
* This is doable, but has some drawbacks:
* - Kmers used for error correction become tied to kmers used for graph building.
* - Hence, large kmers (desireable for graph building because they can resolve repeats better) are a hindrance for error correction,
* because they are seen less often.
* - After error correction, there is no guarantee that a sequence of kmers corresponds to an "actual" read.
*
* An error-corrected set of reads also makes a much smoother graph without the need to resolving so many bubbles.
*
* Idea hence is to correct reads based on their kmer content, but in a context independent from graph building.
* In order to do this, the following steps are taken:
* - The k-mer spectrum of a set of reads in computed. However, we are at freedom to choose the most convenient k-mer size (typicially around
* read length /2).
* - We partition the set of observed k-mers into "solid" kmers which have multiplicity > M, and "insolid" ones otherwise (Pevzner 2001).
*
* - Main idea of the algorithm is to try to substitute a sequence of bases in a read by a sequence better supported by kmers.
* - For each "unsolid" kmer observed in reads, we try to find a "solid" kmer within a maximum Hamming distance.
* - If such solid kmer exists, then this unsolid kmer is "correctable", otherwise, uncorrectable.
* - For each read, then:
* -- Walk through read and visit all kmers.
* -- If kmer is solid, continue to next kmer.
* -- If not, and if it's correctable (i.e. there exists a mapping from an unsolid kmer to a solid kmer within a given Hamming distance),
* add the bases and offsets corresponding to differing positions between unsolid and solid kmer to correction list.
* -- At the end, each base in read will have a list of corrections associated with it. We can then choose to correct or not.
* If read has only consistent corrections, then we can correct base to common base in corrections.
*
* TODO:
* todo Q: WHAT QUALITY TO USE??
* todo how do we deal with mate pairs?
*
*
*/
public class ReadErrorCorrector {
private final static Logger logger = Logger.getLogger(ReadErrorCorrector.class);
/**
* A map of for each kmer to its num occurrences in addKmers
*/
KMerCounter countsByKMer;
Map<Kmer,Kmer> kmerCorrectionMap = new HashMap<>();
Map<Kmer,Pair<int[],byte[]>> kmerDifferingBases = new HashMap<>();
private final int kmerLength;
private final boolean debug;
private final boolean trimLowQualityBases;
private final byte minTailQuality;
private final int maxMismatchesToCorrect;
private final byte qualityOfCorrectedBases;
private final int maxObservationsForKmerToBeCorrectable;
private final int maxHomopolymerLengthInRegion;
private final int minObservationsForKmerToBeSolid;
// default values, for debugging
private final static boolean doInplaceErrorCorrection = false; // currently not used, since we want corrected reads to be used only for assembly
private final static int MAX_MISMATCHES_TO_CORRECT = 2;
private final static byte QUALITY_OF_CORRECTED_BASES = 30; // what's a reasonable value here?
private final static int MAX_OBSERVATIONS_FOR_KMER_TO_BE_CORRECTABLE = 1;
private final static boolean TRIM_LOW_QUAL_TAILS = false;
private final static boolean DONT_CORRECT_IN_LONG_HOMOPOLYMERS = false;
private final static int MAX_HOMOPOLYMER_THRESHOLD = 12;
// debug counter structure
private final ReadErrorCorrectionStats readErrorCorrectionStats = new ReadErrorCorrectionStats();
/**
* Create a new kmer corrector
*
* @param kmerLength the length of kmers we'll be counting to error correct, must be >= 1
* @param maxMismatchesToCorrect e >= 0
* @param qualityOfCorrectedBases Bases to be corrected will be assigned this quality
*/
public ReadErrorCorrector(final int kmerLength,
final int maxMismatchesToCorrect,
final int maxObservationsForKmerToBeCorrectable,
final byte qualityOfCorrectedBases,
final int minObservationsForKmerToBeSolid,
final boolean trimLowQualityBases,
final byte minTailQuality,
final boolean debug,
final byte[] fullReferenceWithPadding) {
if ( kmerLength < 1 ) throw new IllegalArgumentException("kmerLength must be > 0 but got " + kmerLength);
if ( maxMismatchesToCorrect < 1 )
throw new IllegalArgumentException("maxMismatchesToCorrect must be >= 1 but got " + maxMismatchesToCorrect);
if ( qualityOfCorrectedBases < 2 || qualityOfCorrectedBases > QualityUtils.MAX_REASONABLE_Q_SCORE)
throw new IllegalArgumentException("qualityOfCorrectedBases must be >= 2 and <= MAX_REASONABLE_Q_SCORE but got " + qualityOfCorrectedBases);
countsByKMer = new KMerCounter(kmerLength);
this.kmerLength = kmerLength;
this.maxMismatchesToCorrect = maxMismatchesToCorrect;
this.qualityOfCorrectedBases = qualityOfCorrectedBases;
this.minObservationsForKmerToBeSolid = minObservationsForKmerToBeSolid;
this.trimLowQualityBases = trimLowQualityBases;
this.minTailQuality = minTailQuality;
this.debug = debug;
this.maxObservationsForKmerToBeCorrectable = maxObservationsForKmerToBeCorrectable;
// when region has long homopolymers, we may want not to correct reads, since assessment is complicated,
// so we may decide to skip error correction in these regions
maxHomopolymerLengthInRegion = computeMaxHLen(fullReferenceWithPadding);
}
/**
* Simple constructor with sensible defaults
* @param kmerLength K-mer length for error correction (not necessarily the same as for assembly graph)
* @param minTailQuality Minimum tail quality: remaining bases with Q's below this value are hard-clipped after correction
* @param debug Output debug information
*/
public ReadErrorCorrector(final int kmerLength, final byte minTailQuality, final int minObservationsForKmerToBeSolid, final boolean debug,final byte[] fullReferenceWithPadding) {
this(kmerLength, MAX_MISMATCHES_TO_CORRECT, MAX_OBSERVATIONS_FOR_KMER_TO_BE_CORRECTABLE, QUALITY_OF_CORRECTED_BASES, minObservationsForKmerToBeSolid, TRIM_LOW_QUAL_TAILS, minTailQuality, debug,fullReferenceWithPadding);
}
/**
* Main entry routine to add all kmers in a read to the read map counter
* @param read Read to add bases
*/
@Requires("read != null")
protected void addReadKmers(final GATKSAMRecord read) {
if (DONT_CORRECT_IN_LONG_HOMOPOLYMERS && maxHomopolymerLengthInRegion > MAX_HOMOPOLYMER_THRESHOLD)
return;
final byte[] readBases = read.getReadBases();
for (int offset = 0; offset <= readBases.length-kmerLength; offset++ ) {
countsByKMer.addKmer(new Kmer(readBases,offset,kmerLength),1);
}
}
/**
* Correct a collection of reads based on stored k-mer counts
* @param reads
*/
public final List<GATKSAMRecord> correctReads(final Collection<GATKSAMRecord> reads) {
final List<GATKSAMRecord> correctedReads = new ArrayList<>(reads.size());
if (DONT_CORRECT_IN_LONG_HOMOPOLYMERS && maxHomopolymerLengthInRegion > MAX_HOMOPOLYMER_THRESHOLD) {
// just copy reads into output and exit
correctedReads.addAll(reads);
}
else {
computeKmerCorrectionMap();
for (final GATKSAMRecord read: reads) {
final GATKSAMRecord correctedRead = correctRead(read);
if (trimLowQualityBases)
correctedReads.add(ReadClipper.hardClipLowQualEnds(correctedRead, minTailQuality));
else
correctedReads.add(correctedRead);
}
if (debug) {
logger.info("Number of corrected bases:"+readErrorCorrectionStats.numBasesCorrected);
logger.info("Number of corrected reads:"+readErrorCorrectionStats.numReadsCorrected);
logger.info("Number of skipped reads:"+readErrorCorrectionStats.numReadsUncorrected);
logger.info("Number of solid kmers:"+readErrorCorrectionStats.numSolidKmers);
logger.info("Number of corrected kmers:"+readErrorCorrectionStats.numCorrectedKmers);
logger.info("Number of uncorrectable kmers:"+readErrorCorrectionStats.numUncorrectableKmers);
}
}
return correctedReads;
}
/**
* Do actual read correction based on k-mer map. First, loop through stored k-mers to get a list of possible corrections
* for each position in the read. Then correct read based on all possible consistent corrections.
* @param inputRead Read to correct
* @return Corrected read (can be same reference as input if doInplaceErrorCorrection is set)
*/
@Requires("inputRead != null")
private GATKSAMRecord correctRead(final GATKSAMRecord inputRead) {
// no support for reduced reads (which shouldn't need to be error-corrected anyway!)
if (inputRead.isReducedRead())
return inputRead;
// do actual correction
boolean corrected = false;
final byte[] correctedBases = inputRead.getReadBases();
final byte[] correctedQuals = inputRead.getBaseQualities();
// array to store list of possible corrections for read
final CorrectionSet correctionSet = buildCorrectionMap(correctedBases);
for (int offset = 0; offset < correctedBases.length; offset++) {
final Byte b = correctionSet.getConsensusCorrection(offset);
if (b != null && b != correctedBases[offset]) {
correctedBases[offset] = b;
correctedQuals[offset] = qualityOfCorrectedBases;
corrected = true;
}
readErrorCorrectionStats.numBasesCorrected++;
}
if (corrected) {
readErrorCorrectionStats.numReadsCorrected++;
if (doInplaceErrorCorrection) {
inputRead.setReadBases(correctedBases);
inputRead.setBaseQualities(correctedQuals);
return inputRead;
}
else {
GATKSAMRecord correctedRead = new GATKSAMRecord(inputRead);
// do the actual correction
// todo - do we need to clone anything else from read?
correctedRead.setBaseQualities(inputRead.getBaseQualities());
correctedRead.setIsStrandless(inputRead.isStrandless());
correctedRead.setReadBases(inputRead.getReadBases());
correctedRead.setReadString(inputRead.getReadString());
correctedRead.setReadGroup(inputRead.getReadGroup());
return correctedRead;
}
}
else {
readErrorCorrectionStats.numReadsUncorrected++;
return inputRead;
}
}
/**
* Build correction map for each of the bases in read.
* For each of the constituent kmers in read:
* a) See whether the kmer has been mapped to a corrected kmer.
* b) If so, get list of differing positions and corresponding bases.
* c) Add then list of new bases to index in correction list.
* Correction list is of read size, and holds a list of bases to correct.
* @param correctedBases Bases to attempt to correct
* @return CorrectionSet object.
*/
@Requires("correctedBases != null")
private CorrectionSet buildCorrectionMap(final byte[] correctedBases) {
// array to store list of possible corrections for read
final CorrectionSet correctionSet = new CorrectionSet(correctedBases.length);
for (int offset = 0; offset <= correctedBases.length-kmerLength; offset++ ) {
final Kmer kmer = new Kmer(correctedBases,offset,kmerLength);
final Kmer newKmer = kmerCorrectionMap.get(kmer);
if (newKmer != null && !newKmer.equals(kmer)){
final Pair<int[],byte[]> differingPositions = kmerDifferingBases.get(kmer);
final int[] differingIndeces = differingPositions.first;
final byte[] differingBases = differingPositions.second;
for (int k=0; k < differingIndeces.length; k++) {
// get list of differing positions for corrected kmer
// for each of these, add correction candidate to correction set
correctionSet.add(offset + differingIndeces[k],differingBases[k]);
}
}
}
return correctionSet;
}
/**
* Top-level entry point that adds a collection of reads to our kmer list.
* For each read in list, its constituent kmers will be logged in our kmer table.
* @param reads
*/
@Requires("reads != null")
public void addReadsToKmers(final Collection<GATKSAMRecord> reads) {
for (final GATKSAMRecord read: reads)
addReadKmers(read);
if (debug)
for ( final KMerCounter.CountedKmer countedKmer: countsByKMer.getCountedKmers() )
logger.info(String.format("%s\t%d\n", countedKmer.kmer, countedKmer.count));
}
/**
* For each kmer we've seen, do the following:
* a) If kmer count > threshold1, this kmer is good, so correction map will be to itself.
* b) If kmer count <= threshold2, this kmer is bad.
* In that case, loop through all other kmers. If kmer is good, compute distance, and get minimal distance.
* If such distance is < some threshold, map to this kmer, and record differing positions and bases.
*
*/
private void computeKmerCorrectionMap() {
for (final KMerCounter.CountedKmer storedKmer : countsByKMer.getCountedKmers()) {
if (storedKmer.getCount() >= minObservationsForKmerToBeSolid) {
// this kmer is good: map to itself
kmerCorrectionMap.put(storedKmer.getKmer(),storedKmer.getKmer());
kmerDifferingBases.put(storedKmer.getKmer(),new Pair<>(new int[0],new byte[0])); // dummy empty array
readErrorCorrectionStats.numSolidKmers++;
}
else if (storedKmer.getCount() <= maxObservationsForKmerToBeCorrectable) {
// loop now thru all other kmers to find nearest neighbor
final Pair<Kmer,Pair<int[],byte[]>> nearestNeighbor = findNearestNeighbor(storedKmer.getKmer(),countsByKMer,maxMismatchesToCorrect);
// check if nearest neighbor lies in a close vicinity. If so, log the new bases and the correction map
if (nearestNeighbor != null) { // ok, found close neighbor
kmerCorrectionMap.put(storedKmer.getKmer(), nearestNeighbor.first);
kmerDifferingBases.put(storedKmer.getKmer(), nearestNeighbor.second);
readErrorCorrectionStats.numCorrectedKmers++;
// if (debug)
// logger.info("Original kmer:"+storedKmer + "\tCorrected kmer:"+nearestNeighbor.first+"\tDistance:"+dist);
}
else
readErrorCorrectionStats.numUncorrectableKmers++;
}
}
}
/**
* Finds nearest neighbor of a given k-mer, among a list of counted K-mers, up to a given distance.
* If many k-mers share same closest distance, an arbitrary k-mer is picked
* @param kmer K-mer of interest
* @param countsByKMer KMerCounter storing set of counted k-mers (may include kmer of interest)
* @param maxDistance Maximum distance to search
* @return Pair of values: closest K-mer in Hamming distance and list of differing bases.
* If no neighbor can be found up to given distance, returns null
*/
@Requires({"kmer != null", "countsByKMer != null","maxDistance >= 1"})
private Pair<Kmer,Pair<int[],byte[]>> findNearestNeighbor(final Kmer kmer,
final KMerCounter countsByKMer,
final int maxDistance) {
int minimumDistance = Integer.MAX_VALUE;
Kmer closestKmer = null;
final int[] differingIndeces = new int[maxDistance+1];
final byte[] differingBases = new byte[maxDistance+1];
final int[] closestDifferingIndices = new int[maxDistance+1];
final byte[] closestDifferingBases = new byte[maxDistance+1];
for (final KMerCounter.CountedKmer candidateKmer : countsByKMer.getCountedKmers()) {
// skip if candidate set includes test kmer
if (candidateKmer.getKmer().equals(kmer))
continue;
final int hammingDistance = kmer.getDifferingPositions(candidateKmer.getKmer(), maxDistance, differingIndeces, differingBases);
if (hammingDistance < 0) // can't compare kmer? skip
continue;
if (hammingDistance < minimumDistance) {
minimumDistance = hammingDistance;
closestKmer = candidateKmer.getKmer();
System.arraycopy(differingBases,0,closestDifferingBases,0,differingBases.length);
System.arraycopy(differingIndeces,0,closestDifferingIndices,0,differingIndeces.length);
}
}
return new Pair<>(closestKmer, new Pair<>(closestDifferingIndices,closestDifferingBases));
}
/**
* experimental function to compute max homopolymer length in a given reference context
* @param fullReferenceWithPadding Reference context of interest
* @return Max homopolymer length in region
*/
@Requires("fullReferenceWithPadding != null")
private static int computeMaxHLen(final byte[] fullReferenceWithPadding) {
int leftRun = 1;
int maxRun = 1;
for ( int i = 1; i < fullReferenceWithPadding.length; i++) {
if ( fullReferenceWithPadding[i] == fullReferenceWithPadding[i-1] )
leftRun++;
else
leftRun = 1;
}
if (leftRun > maxRun)
maxRun = leftRun;
return maxRun;
}
private static final class ReadErrorCorrectionStats {
public int numReadsCorrected;
public int numReadsUncorrected;
public int numBasesCorrected;
public int numSolidKmers;
public int numUncorrectableKmers;
public int numCorrectedKmers;
}
/**
* Wrapper utility class that holds, for each position in read, a list of bytes representing candidate corrections.
* So, a read ACAGT where the middle A has found to be errorful might look like:
* 0: {}
* 1: {}
* 2: {'C','C','C'}
* 3: {}
* 4: {}
*
* It's up to the method getConsensusCorrection() to decide how to use the correction sets for each position.
* By default, only strict consensus is allowed right now.
*
*/
protected static class CorrectionSet {
private final int size;
private ArrayList<List<Byte>> corrections;
/**
* Main class constructor.
* @param size Size of correction set, needs to be set equal to the read being corrected
*/
public CorrectionSet(final int size) {
this.size = size;
corrections = new ArrayList<>(size);
for (int k=0; k < size; k++)
corrections.add(k,new ArrayList<Byte>());
}
/**
* Add a base to this correction set at a particular offset, measured from the start of the read
* @param offset Offset from start of read
* @param base base to be added to list of corrections at this offset
*/
public void add(final int offset, final byte base) {
if (offset >= size || offset < 0)
throw new IllegalStateException("Bad entry into CorrectionSet: offset > size");
if (!BaseUtils.isRegularBase(base))
return; // no irregular base correction
final List<Byte> storedBytes = corrections.get(offset);
storedBytes.add(base);
}
/**
* Get list of corrections for a particular offset
* @param offset Offset of interest
* @return List of bases representing possible corrections at this offset
*/
public List<Byte> get(final int offset) {
if (offset >= size || offset < 0)
throw new IllegalArgumentException("Illegal call of CorrectionSet.get(): offset must be < size");
return corrections.get(offset);
}
/**
* Get consensus correction for a particular offset. In this implementation, it just boils down to seeing if
* byte list associated with offset has identical values. If so, return this base, otherwise return null.
* @param offset
* @return Consensus base, or null if no consensus possible.
*/
public Byte getConsensusCorrection(final int offset) {
if (offset >= size || offset < 0)
throw new IllegalArgumentException("Illegal call of CorrectionSet.getConsensusCorrection(): offset must be < size");
final List<Byte> storedBytes = corrections.get(offset);
if (storedBytes.isEmpty())
return null;
// todo - is there a cheaper/nicer way to compare if all elements in list are identical??
final byte lastBase = storedBytes.remove(storedBytes.size()-1);
for (final Byte b: storedBytes) {
// strict correction rule: all bases must match
if (b != lastBase)
return null;
}
// all bytes then are equal:
return lastBase;
}
}
}

View File

@ -76,12 +76,10 @@ public class BaseEdge {
}
/**
* Copy constructor
*
* @param toCopy
* Create a new copy of this BaseEdge
*/
public BaseEdge(final BaseEdge toCopy) {
this(toCopy.isRef(), toCopy.getMultiplicity());
public BaseEdge copy() {
return new BaseEdge(isRef(), getMultiplicity());
}
/**
@ -92,6 +90,34 @@ public class BaseEdge {
return multiplicity;
}
/**
* Get the DOT format label for this edge, to be displayed when printing this edge to a DOT file
* @return a non-null string
*/
public String getDotLabel() {
return Integer.toString(getMultiplicity());
}
/**
* Increase the multiplicity of this edge by incr
* @param incr the change in this multiplicity, must be >= 0
*/
public void incMultiplicity(final int incr) {
if ( incr < 0 ) throw new IllegalArgumentException("incr must be >= 0 but got " + incr);
multiplicity += incr;
}
/**
* A special assessor that returns the multiplicity that should be used by pruning algorithm
*
* Can be overloaded by subclasses
*
* @return the multiplicity value that should be used for pruning
*/
public int getPruningMultiplicity() {
return getMultiplicity();
}
/**
* Set the multiplicity of this edge to value
* @param value an integer >= 0
@ -117,23 +143,6 @@ public class BaseEdge {
this.isRef = isRef;
}
/**
* Does this and edge have the same source and target vertices in graph?
*
* @param graph the graph containing both this and edge
* @param edge our comparator edge
* @param <T>
* @return true if we have the same source and target vertices
*/
public <T extends BaseVertex> boolean hasSameSourceAndTarget(final BaseGraph<T> graph, final BaseEdge edge) {
return (graph.getEdgeSource(this).equals(graph.getEdgeSource(edge))) && (graph.getEdgeTarget(this).equals(graph.getEdgeTarget(edge)));
}
// For use when comparing edges across graphs!
public <T extends BaseVertex> boolean seqEquals( final BaseGraph<T> graph, final BaseEdge edge, final BaseGraph<T> graph2 ) {
return (graph.getEdgeSource(this).seqEquals(graph2.getEdgeSource(edge))) && (graph.getEdgeTarget(this).seqEquals(graph2.getEdgeTarget(edge)));
}
/**
* Sorts a collection of BaseEdges in decreasing order of weight, so that the most
* heavily weighted is at the start of the list
@ -187,4 +196,12 @@ public class BaseEdge {
if ( edge == null ) throw new IllegalArgumentException("edge cannot be null");
return new BaseEdge(isRef() || edge.isRef(), Math.max(getMultiplicity(), edge.getMultiplicity()));
}
@Override
public String toString() {
return "BaseEdge{" +
"multiplicity=" + multiplicity +
", isRef=" + isRef +
'}';
}
}

View File

@ -66,34 +66,16 @@ import java.util.*;
* Date: 2/6/13
*/
@Invariant("!this.isAllowingMultipleEdges()")
public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, BaseEdge> {
public class BaseGraph<V extends BaseVertex, E extends BaseEdge> extends DefaultDirectedGraph<V, E> {
protected final static Logger logger = Logger.getLogger(BaseGraph.class);
private final int kmerSize;
/**
* Construct an empty BaseGraph
*/
public BaseGraph() {
this(11);
}
/**
* Edge factory that creates non-reference multiplicity 1 edges
* @param <T> the new of our vertices
*/
private static class MyEdgeFactory<T extends BaseVertex> implements EdgeFactory<T, BaseEdge> {
@Override
public BaseEdge createEdge(T sourceVertex, T targetVertex) {
return new BaseEdge(false, 1);
}
}
/**
* Construct a DeBruijnGraph with kmerSize
* @param kmerSize
*/
public BaseGraph(final int kmerSize) {
super(new MyEdgeFactory<T>());
public BaseGraph(final int kmerSize, final EdgeFactory<V,E> edgeFactory) {
super(edgeFactory);
if ( kmerSize < 1 ) throw new IllegalArgumentException("kmerSize must be >= 1 but got " + kmerSize);
this.kmerSize = kmerSize;
@ -111,7 +93,7 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v the vertex to test
* @return true if this vertex is a reference node (meaning that it appears on the reference path in the graph)
*/
public boolean isReferenceNode( final T v ) {
public boolean isReferenceNode( final V v ) {
if( v == null ) { throw new IllegalArgumentException("Attempting to test a null vertex."); }
for( final BaseEdge e : edgesOf(v) ) {
if( e.isRef() ) { return true; }
@ -123,7 +105,7 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v the vertex to test
* @return true if this vertex is a source node (in degree == 0)
*/
public boolean isSource( final T v ) {
public boolean isSource( final V v ) {
if( v == null ) { throw new IllegalArgumentException("Attempting to test a null vertex."); }
return inDegreeOf(v) == 0;
}
@ -132,7 +114,7 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v the vertex to test
* @return true if this vertex is a sink node (out degree == 0)
*/
public boolean isSink( final T v ) {
public boolean isSink( final V v ) {
if( v == null ) { throw new IllegalArgumentException("Attempting to test a null vertex."); }
return outDegreeOf(v) == 0;
}
@ -141,9 +123,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* Get the set of source vertices of this graph
* @return a non-null set
*/
public Set<T> getSources() {
final Set<T> set = new LinkedHashSet<T>();
for ( final T v : vertexSet() )
public Set<V> getSources() {
final Set<V> set = new LinkedHashSet<V>();
for ( final V v : vertexSet() )
if ( isSource(v) )
set.add(v);
return set;
@ -153,9 +135,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* Get the set of sink vertices of this graph
* @return a non-null set
*/
public Set<T> getSinks() {
final Set<T> set = new LinkedHashSet<T>();
for ( final T v : vertexSet() )
public Set<V> getSinks() {
final Set<V> set = new LinkedHashSet<V>();
for ( final V v : vertexSet() )
if ( isSink(v) )
set.add(v);
return set;
@ -167,7 +149,7 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @return non-null byte array
*/
@Ensures({"result != null"})
public byte[] getAdditionalSequence( final T v ) {
public byte[] getAdditionalSequence( final V v ) {
if( v == null ) { throw new IllegalArgumentException("Attempting to pull sequence from a null vertex."); }
return v.getAdditionalSequence(isSource(v));
}
@ -176,9 +158,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param e the edge to test
* @return true if this edge is a reference source edge
*/
public boolean isRefSource( final BaseEdge e ) {
public boolean isRefSource( final E e ) {
if( e == null ) { throw new IllegalArgumentException("Attempting to test a null edge."); }
for( final BaseEdge edgeToTest : incomingEdgesOf(getEdgeSource(e)) ) {
for( final E edgeToTest : incomingEdgesOf(getEdgeSource(e)) ) {
if( edgeToTest.isRef() ) { return false; }
}
return true;
@ -188,9 +170,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v the vertex to test
* @return true if this vertex is a reference source
*/
public boolean isRefSource( final T v ) {
public boolean isRefSource( final V v ) {
if( v == null ) { throw new IllegalArgumentException("Attempting to test a null vertex."); }
for( final BaseEdge edgeToTest : incomingEdgesOf(v) ) {
for( final E edgeToTest : incomingEdgesOf(v) ) {
if( edgeToTest.isRef() ) { return false; }
}
return true;
@ -200,31 +182,41 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param e the edge to test
* @return true if this edge is a reference sink edge
*/
public boolean isRefSink( final BaseEdge e ) {
public boolean isRefSink( final E e ) {
if( e == null ) { throw new IllegalArgumentException("Attempting to test a null edge."); }
for( final BaseEdge edgeToTest : outgoingEdgesOf(getEdgeTarget(e)) ) {
for( final E edgeToTest : outgoingEdgesOf(getEdgeTarget(e)) ) {
if( edgeToTest.isRef() ) { return false; }
}
return true;
}
/**
* // TODO -- the logic of this test is just wrong
* @param v the vertex to test
* @return true if this vertex is a reference sink
*/
public boolean isRefSink( final T v ) {
public boolean isRefSink( final V v ) {
if( v == null ) { throw new IllegalArgumentException("Attempting to test a null vertex."); }
for( final BaseEdge edgeToTest : outgoingEdgesOf(v) ) {
for( final E edgeToTest : outgoingEdgesOf(v) ) {
if( edgeToTest.isRef() ) { return false; }
}
return true;
}
/**
* Is this both a refsink node and a reference node
* @param v a non-null vertex
* @return true if v is both a sink and a reference node
*/
public boolean isRefNodeAndRefSink(final V v) {
return isRefSink(v) && isReferenceNode(v);
}
/**
* @return the reference source vertex pulled from the graph, can be null if it doesn't exist in the graph
*/
public T getReferenceSourceVertex( ) {
for( final T v : vertexSet() ) {
public V getReferenceSourceVertex( ) {
for( final V v : vertexSet() ) {
if( isReferenceNode(v) && isRefSource(v) ) {
return v;
}
@ -235,8 +227,8 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
/**
* @return the reference sink vertex pulled from the graph, can be null if it doesn't exist in the graph
*/
public T getReferenceSinkVertex( ) {
for( final T v : vertexSet() ) {
public V getReferenceSinkVertex( ) {
for( final V v : vertexSet() ) {
if( isReferenceNode(v) && isRefSink(v) ) {
return v;
}
@ -249,9 +241,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v the current vertex, can be null
* @return the next reference vertex if it exists
*/
public T getNextReferenceVertex( final T v ) {
public V getNextReferenceVertex( final V v ) {
if( v == null ) { return null; }
for( final BaseEdge edgeToTest : outgoingEdgesOf(v) ) {
for( final E edgeToTest : outgoingEdgesOf(v) ) {
if( edgeToTest.isRef() ) {
return getEdgeTarget(edgeToTest);
}
@ -264,9 +256,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v the current vertex, can be null
* @return the previous reference vertex if it exists
*/
public T getPrevReferenceVertex( final T v ) {
public V getPrevReferenceVertex( final V v ) {
if( v == null ) { return null; }
for( final BaseEdge edgeToTest : incomingEdgesOf(v) ) {
for( final E edgeToTest : incomingEdgesOf(v) ) {
if( isReferenceNode(getEdgeSource(edgeToTest)) ) {
return getEdgeSource(edgeToTest);
}
@ -280,8 +272,8 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param toVertex to this vertex, can be null
* @return true if a reference path exists in the graph between the two vertices
*/
public boolean referencePathExists(final T fromVertex, final T toVertex) {
T v = fromVertex;
public boolean referencePathExists(final V fromVertex, final V toVertex) {
V v = fromVertex;
if( v == null ) {
return false;
}
@ -306,18 +298,18 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param includeStop should the ending vertex be included in the path
* @return byte[] array holding the reference bases, this can be null if there are no nodes between the starting and ending vertex (insertions for example)
*/
public byte[] getReferenceBytes( final T fromVertex, final T toVertex, final boolean includeStart, final boolean includeStop ) {
public byte[] getReferenceBytes( final V fromVertex, final V toVertex, final boolean includeStart, final boolean includeStop ) {
if( fromVertex == null ) { throw new IllegalArgumentException("Starting vertex in requested path cannot be null."); }
if( toVertex == null ) { throw new IllegalArgumentException("From vertex in requested path cannot be null."); }
byte[] bytes = null;
T v = fromVertex;
V v = fromVertex;
if( includeStart ) {
bytes = ArrayUtils.addAll(bytes, getAdditionalSequence(v));
}
v = getNextReferenceVertex(v); // advance along the reference path
while( v != null && !v.equals(toVertex) ) {
bytes = ArrayUtils.addAll( bytes, getAdditionalSequence(v) );
bytes = ArrayUtils.addAll(bytes, getAdditionalSequence(v));
v = getNextReferenceVertex(v); // advance along the reference path
}
if( includeStop && v != null && v.equals(toVertex)) {
@ -330,8 +322,8 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* Convenience function to add multiple vertices to the graph at once
* @param vertices one or more vertices to add
*/
public void addVertices(final T ... vertices) {
for ( final T v : vertices )
public void addVertices(final V... vertices) {
for ( final V v : vertices )
addVertex(v);
}
@ -339,8 +331,8 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* Convenience function to add multiple vertices to the graph at once
* @param vertices one or more vertices to add
*/
public void addVertices(final Collection<T> vertices) {
for ( final T v : vertices )
public void addVertices(final Collection<V> vertices) {
for ( final V v : vertices )
addVertex(v);
}
@ -349,8 +341,12 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param start the first vertex to connect
* @param remaining all additional vertices to connect
*/
public void addEdges(final T start, final T ... remaining) {
addEdges(new BaseEdge(false, 1), start, remaining);
public void addEdges(final V start, final V... remaining) {
V prev = start;
for ( final V next : remaining ) {
addEdge(prev, next);
prev = next;
}
}
/**
@ -358,10 +354,10 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param start the first vertex to connect
* @param remaining all additional vertices to connect
*/
public void addEdges(final BaseEdge template, final T start, final T ... remaining) {
T prev = start;
for ( final T next : remaining ) {
addEdge(prev, next, new BaseEdge(template));
public void addEdges(final E template, final V start, final V... remaining) {
V prev = start;
for ( final V next : remaining ) {
addEdge(prev, next, (E)(template.copy())); // TODO -- is there a better way to do this?
prev = next;
}
}
@ -371,9 +367,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v a non-null vertex
* @return a set of vertices connected by outgoing edges from v
*/
public Set<T> outgoingVerticesOf(final T v) {
final Set<T> s = new LinkedHashSet<T>();
for ( final BaseEdge e : outgoingEdgesOf(v) ) {
public Set<V> outgoingVerticesOf(final V v) {
final Set<V> s = new LinkedHashSet<V>();
for ( final E e : outgoingEdgesOf(v) ) {
s.add(getEdgeTarget(e));
}
return s;
@ -384,14 +380,25 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v a non-null vertex
* @return a set of vertices {X} connected X -> v
*/
public Set<T> incomingVerticesOf(final T v) {
final Set<T> s = new LinkedHashSet<T>();
for ( final BaseEdge e : incomingEdgesOf(v) ) {
public Set<V> incomingVerticesOf(final V v) {
final Set<V> s = new LinkedHashSet<V>();
for ( final E e : incomingEdgesOf(v) ) {
s.add(getEdgeSource(e));
}
return s;
}
/**
* Get the set of vertices connected to v by incoming or outgoing edges
* @param v a non-null vertex
* @return a set of vertices {X} connected X -> v or v -> Y
*/
public Set<V> neighboringVerticesOf(final V v) {
final Set<V> s = incomingVerticesOf(v);
s.addAll(outgoingVerticesOf(v));
return s;
}
/**
* Print out the graph in the dot language for visualization
* @param destination File to write to
@ -413,15 +420,16 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
if ( writeHeader )
graphWriter.println("digraph assemblyGraphs {");
for( final BaseEdge edge : edgeSet() ) {
graphWriter.println("\t" + getEdgeSource(edge).toString() + " -> " + getEdgeTarget(edge).toString() + " [" + (edge.getMultiplicity() > 0 && edge.getMultiplicity() <= pruneFactor ? "style=dotted,color=grey," : "") + "label=\"" + edge.getMultiplicity() + "\"];");
for( final E edge : edgeSet() ) {
graphWriter.println("\t" + getEdgeSource(edge).toString() + " -> " + getEdgeTarget(edge).toString() + " [" + (edge.getMultiplicity() > 0 && edge.getMultiplicity() <= pruneFactor ? "style=dotted,color=grey," : "") + "label=\"" + edge.getDotLabel() + "\"];");
if( edge.isRef() ) {
graphWriter.println("\t" + getEdgeSource(edge).toString() + " -> " + getEdgeTarget(edge).toString() + " [color=red];");
}
}
for( final T v : vertexSet() ) {
graphWriter.println("\t" + v.toString() + " [label=\"" + new String(getAdditionalSequence(v)) + "\",shape=box]");
for( final V v : vertexSet() ) {
// graphWriter.println("\t" + v.toString() + " [label=\"" + v + "\",shape=box]");
graphWriter.println("\t" + v.toString() + " [label=\"" + new String(getAdditionalSequence(v)) + v.additionalInfo() + "\",shape=box]");
}
if ( writeHeader )
@ -439,10 +447,10 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
}
// Remove non-ref edges connected before and after the reference path
final Set<BaseEdge> edgesToCheck = new HashSet<BaseEdge>();
final Set<E> edgesToCheck = new HashSet<E>();
edgesToCheck.addAll(incomingEdgesOf(getReferenceSourceVertex()));
while( !edgesToCheck.isEmpty() ) {
final BaseEdge e = edgesToCheck.iterator().next();
final E e = edgesToCheck.iterator().next();
if( !e.isRef() ) {
edgesToCheck.addAll( incomingEdgesOf(getEdgeSource(e)) );
removeEdge(e);
@ -452,7 +460,7 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
edgesToCheck.addAll(outgoingEdgesOf(getReferenceSinkVertex()));
while( !edgesToCheck.isEmpty() ) {
final BaseEdge e = edgesToCheck.iterator().next();
final E e = edgesToCheck.iterator().next();
if( !e.isRef() ) {
edgesToCheck.addAll( outgoingEdgesOf(getEdgeTarget(e)) );
removeEdge(e);
@ -469,9 +477,9 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param pruneFactor all edges with multiplicity <= this factor that aren't ref edges will be removed
*/
public void pruneGraph( final int pruneFactor ) {
final List<BaseEdge> edgesToRemove = new ArrayList<BaseEdge>();
for( final BaseEdge e : edgeSet() ) {
if( e.getMultiplicity() <= pruneFactor && !e.isRef() ) { // remove non-reference edges with weight less than or equal to the pruning factor
final List<E> edgesToRemove = new ArrayList<>();
for( final E e : edgeSet() ) {
if( e.getPruningMultiplicity() <= pruneFactor && !e.isRef() ) { // remove non-reference edges with weight less than or equal to the pruning factor
edgesToRemove.add(e);
}
}
@ -480,13 +488,25 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
removeSingletonOrphanVertices();
}
/**
* Prune all chains from this graph where all edges in the path have multiplicity <= pruneFactor
*
* @see LowWeightChainPruner for more information
*
* @param pruneFactor all edges with multiplicity <= this factor that aren't ref edges will be removed
*/
public void pruneLowWeightChains( final int pruneFactor ) {
final LowWeightChainPruner<V,E> pruner = new LowWeightChainPruner<>(pruneFactor);
pruner.pruneLowWeightChains(this);
}
/**
* Remove all vertices in the graph that have in and out degree of 0
*/
protected void removeSingletonOrphanVertices() {
// Run through the graph and clean up singular orphaned nodes
final List<T> verticesToRemove = new LinkedList<T>();
for( final T v : vertexSet() ) {
final List<V> verticesToRemove = new LinkedList<>();
for( final V v : vertexSet() ) {
if( inDegreeOf(v) == 0 && outDegreeOf(v) == 0 ) {
verticesToRemove.add(v);
}
@ -499,11 +519,11 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* regardless of its direction, from the reference source vertex
*/
public void removeVerticesNotConnectedToRefRegardlessOfEdgeDirection() {
final HashSet<T> toRemove = new HashSet<T>(vertexSet());
final HashSet<V> toRemove = new HashSet<>(vertexSet());
final T refV = getReferenceSourceVertex();
final V refV = getReferenceSourceVertex();
if ( refV != null ) {
for ( final T v : new BaseGraphIterator<T>(this, refV, true, true) ) {
for ( final V v : new BaseGraphIterator<>(this, refV, true, true) ) {
toRemove.remove(v);
}
}
@ -524,22 +544,31 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
}
// get the set of vertices we can reach by going forward from the ref source
final Set<T> onPathFromRefSource = new HashSet<T>(vertexSet().size());
for ( final T v : new BaseGraphIterator<T>(this, getReferenceSourceVertex(), false, true) ) {
final Set<V> onPathFromRefSource = new HashSet<>(vertexSet().size());
for ( final V v : new BaseGraphIterator<>(this, getReferenceSourceVertex(), false, true) ) {
onPathFromRefSource.add(v);
}
// get the set of vertices we can reach by going backward from the ref sink
final Set<T> onPathFromRefSink = new HashSet<T>(vertexSet().size());
for ( final T v : new BaseGraphIterator<T>(this, getReferenceSinkVertex(), true, false) ) {
final Set<V> onPathFromRefSink = new HashSet<>(vertexSet().size());
for ( final V v : new BaseGraphIterator<>(this, getReferenceSinkVertex(), true, false) ) {
onPathFromRefSink.add(v);
}
// we want to remove anything that's not in both the sink and source sets
final Set<T> verticesToRemove = new HashSet<T>(vertexSet());
final Set<V> verticesToRemove = new HashSet<>(vertexSet());
onPathFromRefSource.retainAll(onPathFromRefSink);
verticesToRemove.removeAll(onPathFromRefSource);
removeAllVertices(verticesToRemove);
// simple sanity checks that this algorithm is working.
if ( getSinks().size() > 1 ) {
throw new IllegalStateException("Should have eliminated all but the reference sink, but found " + getSinks());
}
if ( getSources().size() > 1 ) {
throw new IllegalStateException("Should have eliminated all but the reference source, but found " + getSources());
}
}
/**
@ -555,11 +584,11 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param <T> the type of the nodes in those graphs
* @return true if g1 and g2 are equals
*/
public static <T extends BaseVertex> boolean graphEquals(final BaseGraph<T> g1, BaseGraph<T> g2) {
public static <T extends BaseVertex, E extends BaseEdge> boolean graphEquals(final BaseGraph<T,E> g1, BaseGraph<T,E> g2) {
final Set<T> vertices1 = g1.vertexSet();
final Set<T> vertices2 = g2.vertexSet();
final Set<BaseEdge> edges1 = g1.edgeSet();
final Set<BaseEdge> edges2 = g2.edgeSet();
final Set<E> edges1 = g1.edgeSet();
final Set<E> edges2 = g2.edgeSet();
if ( vertices1.size() != vertices2.size() || edges1.size() != edges2.size() )
return false;
@ -571,29 +600,35 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
if ( ! found ) return false;
}
for( final BaseEdge e1 : g1.edgeSet() ) {
for( final E e1 : g1.edgeSet() ) {
boolean found = false;
for( BaseEdge e2 : g2.edgeSet() ) {
if( e1.seqEquals(g1, e2, g2) ) { found = true; break; }
for( E e2 : g2.edgeSet() ) {
if( g1.seqEquals(e1, e2, g2) ) { found = true; break; }
}
if( !found ) { return false; }
}
for( final BaseEdge e2 : g2.edgeSet() ) {
for( final E e2 : g2.edgeSet() ) {
boolean found = false;
for( BaseEdge e1 : g1.edgeSet() ) {
if( e2.seqEquals(g2, e1, g1) ) { found = true; break; }
for( E e1 : g1.edgeSet() ) {
if( g2.seqEquals(e2, e1, g1) ) { found = true; break; }
}
if( !found ) { return false; }
}
return true;
}
// For use when comparing edges across graphs!
private boolean seqEquals( final E edge1, final E edge2, final BaseGraph<V,E> graph2 ) {
return (this.getEdgeSource(edge1).seqEquals(graph2.getEdgeSource(edge2))) && (this.getEdgeTarget(edge1).seqEquals(graph2.getEdgeTarget(edge2)));
}
/**
* Get the incoming edge of v. Requires that there be only one such edge or throws an error
* @param v our vertex
* @return the single incoming edge to v, or null if none exists
*/
public BaseEdge incomingEdgeOf(final T v) {
public E incomingEdgeOf(final V v) {
return getSingletonEdge(incomingEdgesOf(v));
}
@ -602,7 +637,7 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param v our vertex
* @return the single outgoing edge from v, or null if none exists
*/
public BaseEdge outgoingEdgeOf(final T v) {
public E outgoingEdgeOf(final V v) {
return getSingletonEdge(outgoingEdgesOf(v));
}
@ -613,7 +648,7 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @return a edge
*/
@Requires("edges != null")
private BaseEdge getSingletonEdge(final Collection<BaseEdge> edges) {
private E getSingletonEdge(final Collection<E> edges) {
if ( edges.size() > 1 ) throw new IllegalArgumentException("Cannot get a single incoming edge for a vertex with multiple incoming edges " + edges);
return edges.isEmpty() ? null : edges.iterator().next();
}
@ -625,12 +660,87 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
* @param target vertex
* @param e edge to add
*/
public void addOrUpdateEdge(final T source, final T target, final BaseEdge e) {
final BaseEdge prev = getEdge(source, target);
public void addOrUpdateEdge(final V source, final V target, final E e) {
final E prev = getEdge(source, target);
if ( prev != null ) {
prev.add(e);
} else {
addEdge(source, target, e);
}
}
@Override
public String toString() {
return "BaseGraph{" +
"kmerSize=" + kmerSize +
'}';
}
/**
* The base sequence for the given path.
* Note, this assumes that the path does not start with a source node.
*
* @param path the list of vertexes that make up the path
* @return non-null sequence of bases corresponding to the given path
*/
@Ensures({"result != null"})
public byte[] getBasesForPath(final List<? extends DeBruijnVertex> path) {
if ( path == null ) throw new IllegalArgumentException("Path cannot be null");
final StringBuffer sb = new StringBuffer();
for ( final DeBruijnVertex v : path )
sb.append((char)v.getSuffix());
return sb.toString().getBytes();
}
/**
* Get the set of vertices within distance edges of source, regardless of edge direction
*
* @param source the source vertex to consider
* @param distance the distance
* @return a set of vertices within distance of source
*/
protected Set<V> verticesWithinDistance(final V source, final int distance) {
if ( distance == 0 )
return Collections.singleton(source);
final Set<V> found = new HashSet<>();
found.add(source);
for ( final V v : neighboringVerticesOf(source) ) {
found.addAll(verticesWithinDistance(v, distance - 1));
}
return found;
}
/**
* Get a graph containing only the vertices within distance edges of target
* @param target a vertex in graph
* @param distance the max distance
* @return a non-null graph
*/
public BaseGraph<V,E> subsetToNeighbors(final V target, final int distance) {
if ( target == null ) throw new IllegalArgumentException("Target cannot be null");
if ( ! containsVertex(target) ) throw new IllegalArgumentException("Graph doesn't contain vertex " + target);
if ( distance < 0 ) throw new IllegalArgumentException("Distance must be >= 0 but got " + distance);
final Set<V> toKeep = verticesWithinDistance(target, distance);
final Set<V> toRemove = new HashSet<>(vertexSet());
toRemove.removeAll(toKeep);
final BaseGraph<V,E> result = (BaseGraph<V,E>)clone();
result.removeAllVertices(toRemove);
return result;
}
/**
* Get a subgraph of graph that contains only vertices within 10 edges of the ref source vertex
* @return a non-null subgraph of this graph
*/
public BaseGraph<V,E> subsetToRefSource() {
return subsetToNeighbors(getReferenceSourceVertex(), 10);
}
}

View File

@ -60,10 +60,10 @@ import java.util.LinkedList;
* Date: 3/24/13
* Time: 4:41 PM
*/
public class BaseGraphIterator<T extends BaseVertex> implements Iterator<T>, Iterable<T> {
public class BaseGraphIterator<T extends BaseVertex, E extends BaseEdge> implements Iterator<T>, Iterable<T> {
final HashSet<T> visited = new HashSet<T>();
final LinkedList<T> toVisit = new LinkedList<T>();
final BaseGraph<T> graph;
final BaseGraph<T,E> graph;
final boolean followIncomingEdges, followOutgoingEdges;
/**
@ -78,7 +78,7 @@ public class BaseGraphIterator<T extends BaseVertex> implements Iterator<T>, Ite
* traversal? (goes backward through the graph)
* @param followOutgoingEdges should we follow outgoing edges during out traversal?
*/
public BaseGraphIterator(final BaseGraph<T> graph, final T start,
public BaseGraphIterator(final BaseGraph<T,E> graph, final T start,
final boolean followIncomingEdges, final boolean followOutgoingEdges) {
if ( graph == null ) throw new IllegalArgumentException("graph cannot be null");
if ( start == null ) throw new IllegalArgumentException("start cannot be null");

View File

@ -57,6 +57,8 @@ import java.util.Arrays;
* @since 03/2013
*/
public class BaseVertex {
/** placeholder to store additional information for debugging purposes */
String additionalInfo = "";
final byte[] sequence;
private final static int UNASSIGNED_HASHCODE = -1;
int cachedHashCode = UNASSIGNED_HASHCODE;
@ -176,4 +178,18 @@ public class BaseVertex {
public byte[] getAdditionalSequence(final boolean source) {
return getSequence();
}
/**
* Set additional debugging information for this vertex
* @param info
*/
public void setAdditionalInfo(final String info) {
if ( info == null ) throw new IllegalArgumentException("info cannot be null");
additionalInfo = info;
}
/**
* @return the additional information for display about this vertex
*/
public String additionalInfo() { return additionalInfo; }
}

View File

@ -126,10 +126,10 @@ public class CommonSuffixSplitter {
edgesToRemove.add(out);
}
graph.addEdge(suffixV, graph.getEdgeTarget(out), new BaseEdge(out));
graph.addEdge(suffixV, graph.getEdgeTarget(out), out.copy());
for ( final BaseEdge in : graph.incomingEdgesOf(mid) ) {
graph.addEdge(graph.getEdgeSource(in), incomingTarget, new BaseEdge(in));
graph.addEdge(graph.getEdgeSource(in), incomingTarget, in.copy());
edgesToRemove.add(in);
}
}

View File

@ -47,6 +47,7 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import com.google.java.contract.Ensures;
import org.jgrapht.EdgeFactory;
import java.util.Arrays;
import java.util.HashMap;
@ -58,12 +59,22 @@ import java.util.Map;
* User: rpoplin
* Date: 2/6/13
*/
public final class DeBruijnGraph extends BaseGraph<DeBruijnVertex> {
public final class DeBruijnGraph extends BaseGraph<DeBruijnVertex, BaseEdge> {
/**
* Edge factory that creates non-reference multiplicity 1 edges
*/
private static class MyEdgeFactory implements EdgeFactory<DeBruijnVertex, BaseEdge> {
@Override
public BaseEdge createEdge(DeBruijnVertex sourceVertex, DeBruijnVertex targetVertex) {
return new BaseEdge(false, 1);
}
}
/**
* Create an empty DeBruijnGraph with default kmer size
*/
public DeBruijnGraph() {
super();
this(11);
}
/**
@ -71,7 +82,7 @@ public final class DeBruijnGraph extends BaseGraph<DeBruijnVertex> {
* @param kmerSize kmer size, must be >= 1
*/
public DeBruijnGraph(int kmerSize) {
super(kmerSize);
super(kmerSize, new MyEdgeFactory());
}
/**

View File

@ -54,7 +54,7 @@ import com.google.java.contract.Ensures;
* User: ebanks, mdepristo
* Date: Mar 23, 2011
*/
public final class DeBruijnVertex extends BaseVertex {
public class DeBruijnVertex extends BaseVertex {
private final static byte[][] sufficesAsByteArray = new byte[256][];
static {
for ( int i = 0; i < sufficesAsByteArray.length; i++ )

View File

@ -48,6 +48,7 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import org.broadinstitute.sting.utils.collections.PrimitivePair;
import java.util.ArrayList;
import java.util.Collection;
@ -60,7 +61,7 @@ import java.util.List;
* Date: 3/25/13
* Time: 9:42 PM
*/
final class GraphUtils {
final public class GraphUtils {
private GraphUtils() {}
/**
@ -135,4 +136,57 @@ final class GraphUtils {
return min;
}
/**
* Find the ending position of the longest uniquely matching
* run of bases of kmer in seq.
*
* for example, if seq = ACGT and kmer is NAC, this function returns 1,2 as we have the following
* match:
*
* 0123
* .ACGT
* NAC..
*
* @param seq a non-null sequence of bytes
* @param kmer a non-null kmer
* @return the ending position and length where kmer matches uniquely in sequence, or null if no
* unique longest match can be found
*/
public static PrimitivePair.Int findLongestUniqueSuffixMatch(final byte[] seq, final byte[] kmer) {
int longestPos = -1;
int length = 0;
boolean foundDup = false;
for ( int i = 0; i < seq.length; i++ ) {
final int matchSize = longestSuffixMatch(seq, kmer, i);
if ( matchSize > length ) {
longestPos = i;
length = matchSize;
foundDup = false;
} else if ( matchSize == length ) {
foundDup = true;
}
}
return foundDup ? null : new PrimitivePair.Int(longestPos, length);
}
/**
* calculates the longest suffix match between a sequence and a smaller kmer
*
* @param seq the (reference) sequence
* @param kmer the smaller kmer sequence
* @param seqStart the index (inclusive) on seq to start looking backwards from
* @return the longest matching suffix
*/
public static int longestSuffixMatch(final byte[] seq, final byte[] kmer, final int seqStart) {
for ( int len = 1; len <= kmer.length; len++ ) {
final int seqI = seqStart - len + 1;
final int kmerI = kmer.length - len;
if ( seqI < 0 || seq[seqI] != kmer[kmerI] ) {
return len - 1;
}
}
return kmer.length;
}
}

View File

@ -59,7 +59,7 @@ import java.util.*;
* User: ebanks, rpoplin, mdepristo
* Date: Mar 23, 2011
*/
public class KBestPaths<T extends BaseVertex> {
public class KBestPaths<T extends BaseVertex, E extends BaseEdge> {
private final boolean allowCycles;
/**
@ -93,7 +93,7 @@ public class KBestPaths<T extends BaseVertex> {
/**
* @see #getKBestPaths(BaseGraph, int) retriving the best 1000 paths
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph ) {
public List<Path<T,E>> getKBestPaths( final BaseGraph<T, E> graph ) {
return getKBestPaths(graph, 1000);
}
@ -101,28 +101,28 @@ public class KBestPaths<T extends BaseVertex> {
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) retriving the first 1000 paths
* starting from all source vertices and ending with all sink vertices
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final int k ) {
public List<Path<T,E>> getKBestPaths( final BaseGraph<T,E> graph, final int k ) {
return getKBestPaths(graph, k, graph.getSources(), graph.getSinks());
}
/**
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) with k=1000
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final Set<T> sources, final Set<T> sinks ) {
public List<Path<T,E>> getKBestPaths( final BaseGraph<T,E> graph, final Set<T> sources, final Set<T> sinks ) {
return getKBestPaths(graph, 1000, sources, sinks);
}
/**
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) with k=1000
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final T source, final T sink ) {
public List<Path<T,E>> getKBestPaths( final BaseGraph<T,E> graph, final T source, final T sink ) {
return getKBestPaths(graph, 1000, source, sink);
}
/**
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) with singleton source and sink sets
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final int k, final T source, final T sink ) {
public List<Path<T,E>> getKBestPaths( final BaseGraph<T,E> graph, final int k, final T source, final T sink ) {
return getKBestPaths(graph, k, Collections.singleton(source), Collections.singleton(sink));
}
@ -136,20 +136,20 @@ public class KBestPaths<T extends BaseVertex> {
* @return a list with at most k top-scoring paths from the graph
*/
@Ensures({"result != null", "result.size() <= k"})
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final int k, final Set<T> sources, final Set<T> sinks ) {
public List<Path<T,E>> getKBestPaths( final BaseGraph<T,E> graph, final int k, final Set<T> sources, final Set<T> sinks ) {
if( graph == null ) { throw new IllegalArgumentException("Attempting to traverse a null graph."); }
// a min max queue that will collect the best k paths
final MinMaxPriorityQueue<Path<T>> bestPaths = MinMaxPriorityQueue.orderedBy(new PathComparatorTotalScore()).maximumSize(k).create();
final MinMaxPriorityQueue<Path<T,E>> bestPaths = MinMaxPriorityQueue.orderedBy(new PathComparatorTotalScore()).maximumSize(k).create();
// run a DFS for best paths
for ( final T source : sources ) {
final Path<T> startingPath = new Path<T>(source, graph);
final Path<T,E> startingPath = new Path<T,E>(source, graph);
findBestPaths(startingPath, sinks, bestPaths, new MyInt());
}
// the MinMaxPriorityQueue iterator returns items in an arbitrary order, so we need to sort the final result
final List<Path<T>> toReturn = new ArrayList<Path<T>>(bestPaths);
final List<Path<T,E>> toReturn = new ArrayList<Path<T,E>>(bestPaths);
Collections.sort(toReturn, new PathComparatorTotalScore());
return toReturn;
}
@ -161,21 +161,21 @@ public class KBestPaths<T extends BaseVertex> {
* @param bestPaths a path to collect completed paths.
* @param n used to limit the search by tracking the number of vertices visited across all paths
*/
private void findBestPaths( final Path<T> path, final Set<T> sinks, final Collection<Path<T>> bestPaths, final MyInt n ) {
private void findBestPaths( final Path<T,E> path, final Set<T> sinks, final Collection<Path<T,E>> bestPaths, final MyInt n ) {
if ( sinks.contains(path.getLastVertex())) {
bestPaths.add(path);
} else if( n.val > 10000 ) {
// do nothing, just return, as we've done too much work already
} else {
// recursively run DFS
final ArrayList<BaseEdge> edgeArrayList = new ArrayList<BaseEdge>(path.getOutgoingEdgesOfLastVertex());
final ArrayList<E> edgeArrayList = new ArrayList<E>(path.getOutgoingEdgesOfLastVertex());
Collections.sort(edgeArrayList, new BaseEdge.EdgeWeightComparator());
for ( final BaseEdge edge : edgeArrayList ) {
for ( final E edge : edgeArrayList ) {
final T target = path.getGraph().getEdgeTarget(edge);
// make sure the edge is not already in the path
final boolean alreadyVisited = allowCycles ? path.containsEdge(edge) : path.containsVertex(target);
if ( ! alreadyVisited ) {
final Path<T> newPath = new Path<T>(path, edge);
final Path<T,E> newPath = new Path<T,E>(path, edge);
n.val++;
findBestPaths(newPath, sinks, bestPaths, n);
}

View File

@ -0,0 +1,174 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import java.util.*;
/**
/**
* Prune all chains from this graph where all edges in the path have multiplicity <= pruneFactor
*
* Unlike pruneGraph, this function will remove only linear chains in the graph where all edges have weight <= pruneFactor.
*
* For A -[1]> B -[1]> C -[1]> D would be removed with pruneFactor 1
* but A -[1]> B -[2]> C -[1]> D would not be because the linear chain includes an edge with weight >= 2
*
* User: depristo
* Date: 5/2/13
* Time: 10:38 AM
*/
public class LowWeightChainPruner<V extends BaseVertex, E extends BaseEdge> {
private final int pruneFactor;
public LowWeightChainPruner(int pruneFactor) {
if ( pruneFactor < 0 ) throw new IllegalArgumentException("pruneFactor must be >= 0 but got " + pruneFactor);
this.pruneFactor = pruneFactor;
}
/**
* Prune graph
* @param graph the graph to prune
*/
public void pruneLowWeightChains(final BaseGraph<V,E> graph) {
if ( graph == null ) throw new IllegalArgumentException("Graph cannot be null");
if ( pruneFactor > 0 ) {
final Set<E> edgesToKeep = new LinkedHashSet<>();
for ( final Path<V,E> linearChain : getLinearChains(graph) ) {
if( mustBeKept(linearChain, pruneFactor) ) {
// we must keep edges in any path that contains a reference edge or an edge with weight > pruneFactor
edgesToKeep.addAll(linearChain.getEdges());
}
}
// we want to remove all edges not in the keep set
final Set<E> edgesToRemove = new HashSet<>(graph.edgeSet());
edgesToRemove.removeAll(edgesToKeep);
graph.removeAllEdges(edgesToRemove);
graph.removeSingletonOrphanVertices();
}
}
/**
* Traverse the edges in the path and determine if any are either ref edges or have weight above
* the pruning factor and should therefore not be pruned away.
*
* @param path the path in question
* @param pruneFactor the integer pruning factor
* @return true if any edge in the path must be kept
*/
private boolean mustBeKept(final Path<V, E> path, final int pruneFactor) {
for ( final E edge : path.getEdges() ) {
if ( edge.getPruningMultiplicity() >= pruneFactor || edge.isRef() )
return true;
}
return false;
}
/**
* Get all of the linear chains in graph
*
* A linear chain is a series of vertices that start from either a source of a vertex with
* out-degree > 1 and extend through all vertices accessible via an outgoing edge from this
* vertex that have in == 1 and out degree of 0 or 1.
*
* @param graph the graph
* @return a non-null collection of paths in graph
*/
protected final Collection<Path<V,E>> getLinearChains(final BaseGraph<V,E> graph) {
final Set<V> chainStarts = new LinkedHashSet<>();
for ( final V v : graph.vertexSet() ) {
// we want a list of all chain start vertices. These are all vertices with out
// degree > 1, or all source vertices.
final int outDegree = graph.outDegreeOf(v);
final int inDegree = graph.inDegreeOf(v);
if ( outDegree > 1 || inDegree > 1 || (inDegree == 0 && outDegree > 0)) // don't add isolated vertices
chainStarts.add(v);
}
// must be after since we can add duplicate starts in the above finding algorithm
final List<Path<V, E>> linearChains = new LinkedList<>();
for ( final V chainStart : chainStarts ) {
for ( final E outEdge : graph.outgoingEdgesOf(chainStart) ) {
// these chains are composed of the starts + their next vertices
linearChains.add(extendLinearChain(new Path<>(new Path<>(chainStart, graph), outEdge)));
}
}
return linearChains;
}
/**
* Extend path while the last vertex has in and out degrees of 1 or 0
* @param path the path to extend
* @return a fully extended linear path
*/
protected final Path<V,E> extendLinearChain(final Path<V, E> path) {
final V last = path.getLastVertex();
final Set<E> outEdges = path.getGraph().outgoingEdgesOf(last);
final int outDegree = outEdges.size();
final int inDegree = path.getGraph().inDegreeOf(last);
if ( outDegree != 1 || inDegree > 1 ) {
// out next vertex has multiple outgoing edges, so we are done with the linear path
return path;
} else {
final V next = path.getGraph().getEdgeTarget(outEdges.iterator().next());
if ( path.containsVertex(next) ) {
// we are done if the path contains a cycle
return path;
} else {
// we now know that last has outdegree == 1, so we keep extending the chain
return extendLinearChain(new Path<>(path, outEdges.iterator().next()));
}
}
}
}

View File

@ -0,0 +1,128 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import java.util.PriorityQueue;
/**
* edge class for connecting nodes in the graph that tracks some per-sample information
*
* This class extends BaseEdge with the additional functionality of tracking the maximum
* multiplicity seen within any single sample. The workflow for using this class is:
*
* MultiSampleEdge e = new MultiSampleEdge(ref, 1)
* e.incMultiplicity(1) // total is 2, per sample is 2, max per sample is 1
* e.getPruningMultiplicity() // = 1
* e.flushSingleSampleMultiplicity() // total is 2, per sample is 0, max per sample is 2
* e.getPruningMultiplicity() // = 2
* e.incMultiplicity(3) // total is 5, per sample is 3, max per sample is 2
* e.getPruningMultiplicity() // = 2
* e.flushSingleSampleMultiplicity() // total is 5, per sample is 0, max per sample is 3
* e.getPruningMultiplicity() // = 3
*/
public class MultiSampleEdge extends BaseEdge {
private int currentSingleSampleMultiplicity;
private final int singleSampleCapacity;
private final PriorityQueue<Integer> singleSampleMultiplicities;
/**
* Create a new MultiSampleEdge with weight multiplicity and, if isRef == true, indicates a path through the reference
*
* @param isRef indicates whether this edge is a path through the reference
* @param multiplicity the number of observations of this edge in this sample
* @param singleSampleCapacity the max number of samples to track edge multiplicities
*/
public MultiSampleEdge(final boolean isRef, final int multiplicity, final int singleSampleCapacity) {
super(isRef, multiplicity);
if( singleSampleCapacity <= 0 ) { throw new IllegalArgumentException("singleSampleCapacity must be > 0 but found: " + singleSampleCapacity); }
singleSampleMultiplicities = new PriorityQueue<>(singleSampleCapacity);
singleSampleMultiplicities.add(multiplicity);
currentSingleSampleMultiplicity = multiplicity;
this.singleSampleCapacity = singleSampleCapacity;
}
@Override
public MultiSampleEdge copy() {
return new MultiSampleEdge(isRef(), getMultiplicity(), singleSampleCapacity); // TODO -- should I copy values for other features?
}
/**
* update the single sample multiplicities by adding the current single sample multiplicity to the priority queue, and
* reset the current single sample multiplicity to 0.
*/
public void flushSingleSampleMultiplicity() {
singleSampleMultiplicities.add(currentSingleSampleMultiplicity);
if( singleSampleMultiplicities.size() == singleSampleCapacity + 1 ) {
singleSampleMultiplicities.poll(); // remove the lowest multiplicity from the list
} else if( singleSampleMultiplicities.size() > singleSampleCapacity + 1 ) {
throw new IllegalStateException("Somehow the per sample multiplicity list has grown too big: " + singleSampleMultiplicities);
}
currentSingleSampleMultiplicity = 0;
}
@Override
public void incMultiplicity(final int incr) {
super.incMultiplicity(incr);
currentSingleSampleMultiplicity += incr;
}
@Override
public int getPruningMultiplicity() {
return singleSampleMultiplicities.peek();
}
@Override
public String getDotLabel() {
return super.getDotLabel() + "/" + getPruningMultiplicity();
}
/** only provided for testing purposes */
protected int getCurrentSingleSampleMultiplicity() {
return currentSingleSampleMultiplicity;
}
}

View File

@ -47,13 +47,12 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import org.apache.commons.lang.ArrayUtils;
import org.broadinstitute.sting.utils.smithwaterman.Parameters;
import org.broadinstitute.sting.utils.smithwaterman.SWPairwiseAlignment;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.utils.smithwaterman.*;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import java.util.*;
@ -68,40 +67,39 @@ import java.util.*;
* Time: 2:34 PM
*
*/
public class Path<T extends BaseVertex> {
private final static int MAX_CIGAR_ELEMENTS_BEFORE_FAILING_SW = 20;
public class Path<T extends BaseVertex, E extends BaseEdge> {
private final static String SW_PAD = "NNNNNNNNNN";
private final static Logger logger = Logger.getLogger(Path.class);
// the last vertex seen in the path
private final T lastVertex;
// the list of edges comprising the path
private Set<BaseEdge> edgesAsSet = null;
private final LinkedList<BaseEdge> edgesInOrder;
private Set<E> edgesAsSet = null;
private final LinkedList<E> edgesInOrder;
// the scores for the path
private final int totalScore;
// the graph from which this path originated
private final BaseGraph<T> graph;
private final BaseGraph<T, E> graph;
// used in the bubble state machine to apply Smith-Waterman to the bubble sequence
// these values were chosen via optimization against the NA12878 knowledge base
public static final Parameters NEW_SW_PARAMETERS = new Parameters(20.0, -15.0, -26.0, -1.1);
private static final byte[] STARTING_SW_ANCHOR_BYTES = "XXXXXXXXX".getBytes();
/**
* Create a new Path containing no edges and starting at initialVertex
* @param initialVertex the starting vertex of the path
* @param graph the graph this path with follow through
* @param graph the graph this path will follow through
*/
public Path(final T initialVertex, final BaseGraph<T> graph) {
public Path(final T initialVertex, final BaseGraph<T, E> graph) {
if ( initialVertex == null ) throw new IllegalArgumentException("initialVertex cannot be null");
if ( graph == null ) throw new IllegalArgumentException("graph cannot be null");
if ( ! graph.containsVertex(initialVertex) ) throw new IllegalArgumentException("Vertex " + initialVertex + " must be part of graph " + graph);
lastVertex = initialVertex;
edgesInOrder = new LinkedList<BaseEdge>();
edgesInOrder = new LinkedList<E>();
totalScore = 0;
this.graph = graph;
}
@ -109,10 +107,10 @@ public class Path<T extends BaseVertex> {
/**
* Convenience constructor for testing that creates a path through vertices in graph
*/
protected static <T extends BaseVertex> Path<T> makePath(final List<T> vertices, final BaseGraph<T> graph) {
Path<T> path = new Path<T>(vertices.get(0), graph);
protected static <T extends BaseVertex, E extends BaseEdge> Path<T,E> makePath(final List<T> vertices, final BaseGraph<T, E> graph) {
Path<T,E> path = new Path<T,E>(vertices.get(0), graph);
for ( int i = 1; i < vertices.size(); i++ )
path = new Path<T>(path, graph.getEdge(path.lastVertex, vertices.get(i)));
path = new Path<T,E>(path, graph.getEdge(path.lastVertex, vertices.get(i)));
return path;
}
@ -122,7 +120,7 @@ public class Path<T extends BaseVertex> {
* @param p the path to extend
* @param edge the edge to extend path by
*/
public Path(final Path<T> p, final BaseEdge edge) {
public Path(final Path<T,E> p, final E edge) {
if ( p == null ) throw new IllegalArgumentException("Path cannot be null");
if ( edge == null ) throw new IllegalArgumentException("Edge cannot be null");
if ( ! p.graph.containsEdge(edge) ) throw new IllegalArgumentException("Graph must contain edge " + edge + " but it doesn't");
@ -130,7 +128,7 @@ public class Path<T extends BaseVertex> {
graph = p.graph;
lastVertex = p.graph.getEdgeTarget(edge);
edgesInOrder = new LinkedList<BaseEdge>(p.getEdges());
edgesInOrder = new LinkedList<E>(p.getEdges());
edgesInOrder.add(edge);
totalScore = p.totalScore + edge.getMultiplicity();
}
@ -139,7 +137,7 @@ public class Path<T extends BaseVertex> {
* Get the collection of edges leaving the last vertex of this path
* @return a non-null collection
*/
public Collection<BaseEdge> getOutgoingEdgesOfLastVertex() {
public Collection<E> getOutgoingEdgesOfLastVertex() {
return getGraph().outgoingEdgesOf(getLastVertex());
}
@ -148,12 +146,12 @@ public class Path<T extends BaseVertex> {
* @param edge the given edge to test
* @return true if the edge is found in this path
*/
public boolean containsEdge( final BaseEdge edge ) {
public boolean containsEdge( final E edge ) {
if( edge == null ) { throw new IllegalArgumentException("Attempting to test null edge."); }
if ( edgesInOrder.isEmpty() ) return false;
// initialize contains cache if necessary
if ( edgesAsSet == null ) edgesAsSet = new HashSet<BaseEdge>(edgesInOrder);
if ( edgesAsSet == null ) edgesAsSet = new HashSet<E>(edgesInOrder);
return edgesAsSet.contains(edge);
}
@ -175,7 +173,7 @@ public class Path<T extends BaseVertex> {
* @param path the other path we might be the same as
* @return true if this and path are the same
*/
protected boolean pathsAreTheSame(Path<T> path) {
protected boolean pathsAreTheSame(Path<T,E> path) {
return totalScore == path.totalScore && edgesInOrder.equals(path.edgesInOrder);
}
@ -199,7 +197,7 @@ public class Path<T extends BaseVertex> {
* @return a non-null graph
*/
@Ensures("result != null")
public BaseGraph<T> getGraph() {
public BaseGraph<T, E> getGraph() {
return graph;
}
@ -208,7 +206,7 @@ public class Path<T extends BaseVertex> {
* @return a non-null list of edges
*/
@Ensures("result != null")
public List<BaseEdge> getEdges() { return edgesInOrder; }
public List<E> getEdges() { return edgesInOrder; }
/**
* Get the list of vertices in this path in order defined by the edges of the path
@ -221,7 +219,7 @@ public class Path<T extends BaseVertex> {
else {
final LinkedList<T> vertices = new LinkedList<T>();
boolean first = true;
for ( final BaseEdge e : getEdges() ) {
for ( final E e : getEdges() ) {
if ( first ) {
vertices.add(graph.getEdgeSource(e));
first = false;
@ -246,6 +244,14 @@ public class Path<T extends BaseVertex> {
@Ensures("result != null")
public T getLastVertex() { return lastVertex; }
/**
* Get the first vertex in this path
* @return a non-null vertex
*/
public T getFirstVertex() {
return getGraph().getEdgeSource(edgesInOrder.pollFirst());
}
/**
* The base sequence for this path. Pull the full sequence for source nodes and then the suffix for all subsequent nodes
* @return non-null sequence of bases corresponding to this path
@ -255,174 +261,114 @@ public class Path<T extends BaseVertex> {
if( getEdges().isEmpty() ) { return graph.getAdditionalSequence(lastVertex); }
byte[] bases = graph.getAdditionalSequence(graph.getEdgeSource(edgesInOrder.getFirst()));
for( final BaseEdge e : edgesInOrder ) {
for( final E e : edgesInOrder ) {
bases = ArrayUtils.addAll(bases, graph.getAdditionalSequence(graph.getEdgeTarget(e)));
}
return bases;
}
/**
* Calculate the cigar string for this path using a bubble traversal of the assembly graph and running a Smith-Waterman alignment on each bubble
* @return non-null Cigar string with reference length equal to the refHaplotype's reference length
* Calculate the cigar elements for this path against the reference sequence
*
* @param refSeq the reference sequence that all of the bases in this path should align to
* @return a Cigar mapping this path to refSeq, or null if no reasonable alignment could be found
*/
@Ensures("result != null")
public Cigar calculateCigar() {
final Cigar cigar = new Cigar();
// special case for paths that start on reference but not at the reference source node
if( edgesInOrder.getFirst().isRef() && !graph.isRefSource(edgesInOrder.getFirst()) ) {
for( final CigarElement ce : calculateCigarForCompleteBubble(null, null, graph.getEdgeSource(edgesInOrder.getFirst())).getCigarElements() ) {
cigar.add(ce);
}
public Cigar calculateCigar(final byte[] refSeq) {
if ( getBases().length == 0 ) {
// horrible edge case from the unit tests, where this path has no bases
return new Cigar(Arrays.asList(new CigarElement(refSeq.length, CigarOperator.D)));
}
// reset the bubble state machine
final BubbleStateMachine<T> bsm = new BubbleStateMachine<T>(cigar);
final byte[] bases = getBases();
final Cigar nonStandard;
for( final BaseEdge e : getEdges() ) {
if ( e.hasSameSourceAndTarget(graph, edgesInOrder.getFirst()) ) {
advanceBubbleStateMachine( bsm, graph.getEdgeSource(e), null );
}
advanceBubbleStateMachine( bsm, graph.getEdgeTarget(e), e );
final String paddedRef = SW_PAD + new String(refSeq) + SW_PAD;
final String paddedPath = SW_PAD + new String(bases) + SW_PAD;
final SmithWaterman alignment = new SWPairwiseAlignment( paddedRef.getBytes(), paddedPath.getBytes(), NEW_SW_PARAMETERS );
if ( isSWFailure(alignment) )
return null;
// cut off the padding bases
final int baseStart = SW_PAD.length();
final int baseEnd = paddedPath.length() - SW_PAD.length() - 1; // -1 because it's inclusive
nonStandard = AlignmentUtils.trimCigarByBases(alignment.getCigar(), baseStart, baseEnd);
if ( nonStandard.getReferenceLength() != refSeq.length ) {
nonStandard.add(new CigarElement(refSeq.length - nonStandard.getReferenceLength(), CigarOperator.D));
}
// special case for paths that don't end on reference
if( bsm.inBubble ) {
for( final CigarElement ce : calculateCigarForCompleteBubble(bsm.bubbleBytes, bsm.lastSeenReferenceNode, null).getCigarElements() ) {
bsm.cigar.add(ce);
}
} else if( edgesInOrder.getLast().isRef() && !graph.isRefSink(edgesInOrder.getLast()) ) { // special case for paths that end of the reference but haven't completed the entire reference circuit
for( final CigarElement ce : calculateCigarForCompleteBubble(bsm.bubbleBytes, graph.getEdgeTarget(edgesInOrder.getLast()), null).getCigarElements() ) {
bsm.cigar.add(ce);
}
}
return AlignmentUtils.consolidateCigar(bsm.cigar);
// finally, return the cigar with all indels left aligned
return leftAlignCigarSequentially(nonStandard, refSeq, getBases(), 0, 0);
}
/**
* Advance the bubble state machine by incorporating the next node in the path.
* @param bsm the current bubble state machine
* @param node the node to be incorporated
* @param e the edge which generated this node in the path
* Make sure that the SW didn't fail in some terrible way, and throw exception if it did
*/
@Requires({"bsm != null", "graph != null", "node != null"})
private void advanceBubbleStateMachine( final BubbleStateMachine<T> bsm, final T node, final BaseEdge e ) {
if( graph.isReferenceNode( node ) ) {
if( !bsm.inBubble ) { // just add the ref bases as M's in the Cigar string, and don't do anything else
if( e !=null && !e.isRef() ) {
if( graph.referencePathExists( graph.getEdgeSource(e), node) ) {
for( final CigarElement ce : calculateCigarForCompleteBubble(null, graph.getEdgeSource(e), node).getCigarElements() ) {
bsm.cigar.add(ce);
}
bsm.cigar.add( new CigarElement( graph.getAdditionalSequence(node).length, CigarOperator.M) );
} else if ( graph.getEdgeSource(e).equals(graph.getEdgeTarget(e)) ) { // alt edge at ref node points to itself
bsm.cigar.add( new CigarElement( graph.getAdditionalSequence(node).length, CigarOperator.I) );
} else {
bsm.inBubble = true;
bsm.bubbleBytes = null;
bsm.lastSeenReferenceNode = graph.getEdgeSource(e);
bsm.bubbleBytes = ArrayUtils.addAll( bsm.bubbleBytes, graph.getAdditionalSequence(node) );
}
} else {
bsm.cigar.add( new CigarElement( graph.getAdditionalSequence(node).length, CigarOperator.M) );
}
} else if( bsm.lastSeenReferenceNode != null && !graph.referencePathExists( bsm.lastSeenReferenceNode, node ) ) { // add bases to the bubble string until we get back to the reference path
bsm.bubbleBytes = ArrayUtils.addAll( bsm.bubbleBytes, graph.getAdditionalSequence(node) );
} else { // close the bubble and use a local SW to determine the Cigar string
for( final CigarElement ce : calculateCigarForCompleteBubble(bsm.bubbleBytes, bsm.lastSeenReferenceNode, node).getCigarElements() ) {
bsm.cigar.add(ce);
}
bsm.inBubble = false;
bsm.bubbleBytes = null;
bsm.lastSeenReferenceNode = null;
bsm.cigar.add( new CigarElement( graph.getAdditionalSequence(node).length, CigarOperator.M) );
}
} else { // non-ref vertex
if( bsm.inBubble ) { // just keep accumulating until we get back to the reference path
bsm.bubbleBytes = ArrayUtils.addAll( bsm.bubbleBytes, graph.getAdditionalSequence(node) );
} else { // open up a bubble
bsm.inBubble = true;
bsm.bubbleBytes = null;
bsm.lastSeenReferenceNode = (e != null ? graph.getEdgeSource(e) : null );
bsm.bubbleBytes = ArrayUtils.addAll( bsm.bubbleBytes, graph.getAdditionalSequence(node) );
}
private boolean isSWFailure(final SmithWaterman alignment) {
// check that the alignment starts at the first base, which it should given the padding
if ( alignment.getAlignmentStart2wrt1() > 0 ) {
return true;
// throw new IllegalStateException("SW failure ref " + paddedRef + " vs. " + paddedPath + " should always start at 0, but got " + alignment.getAlignmentStart2wrt1() + " with cigar " + alignment.getCigar());
}
// check that we aren't getting any S operators (which would be very bad downstream)
for ( final CigarElement ce : alignment.getCigar().getCigarElements() ) {
if ( ce.getOperator() == CigarOperator.S )
return true;
// soft clips at the end of the alignment are really insertions
// throw new IllegalStateException("SW failure ref " + paddedRef + " vs. " + paddedPath + " should never contain S operators but got cigar " + alignment.getCigar());
}
return false;
}
/**
* Now that we have a completed bubble run a Smith-Waterman alignment to determine the cigar string for this bubble
* @param bubbleBytes the bytes that comprise the alternate allele path in this bubble
* @param fromVertex the vertex that marks the beginning of the reference path in this bubble (null indicates ref source vertex)
* @param toVertex the vertex that marks the end of the reference path in this bubble (null indicates ref sink vertex)
* @return the cigar string generated by running a SW alignment between the reference and alternate paths in this bubble
* Left align the given cigar sequentially. This is needed because AlignmentUtils doesn't accept cigars with more than one indel in them.
* This is a target of future work to incorporate and generalize into AlignmentUtils for use by others.
* @param cigar the cigar to left align
* @param refSeq the reference byte array
* @param readSeq the read byte array
* @param refIndex 0-based alignment start position on ref
* @param readIndex 0-based alignment start position on read
* @return the left-aligned cigar
*/
@Requires({"graph != null"})
@Ensures({"result != null"})
private Cigar calculateCigarForCompleteBubble( final byte[] bubbleBytes, final T fromVertex, final T toVertex ) {
final byte[] refBytes = graph.getReferenceBytes(fromVertex == null ? graph.getReferenceSourceVertex() : fromVertex, toVertex == null ? graph.getReferenceSinkVertex() : toVertex, fromVertex == null, toVertex == null);
final Cigar returnCigar = new Cigar();
// add padding to anchor ref/alt bases in the SW matrix
byte[] padding = STARTING_SW_ANCHOR_BYTES;
boolean goodAlignment = false;
SWPairwiseAlignment swConsensus = null;
while( !goodAlignment && padding.length < 1000 ) {
padding = ArrayUtils.addAll(padding, padding); // double the size of the padding each time
final byte[] reference = ArrayUtils.addAll( ArrayUtils.addAll(padding, refBytes), padding );
final byte[] alternate = ArrayUtils.addAll( ArrayUtils.addAll(padding, bubbleBytes), padding );
swConsensus = new SWPairwiseAlignment( reference, alternate, NEW_SW_PARAMETERS );
if( swConsensus.getAlignmentStart2wrt1() == 0 && !swConsensus.getCigar().toString().contains("S") && swConsensus.getCigar().getReferenceLength() == reference.length ) {
goodAlignment = true;
@Ensures({"cigar != null", "refSeq != null", "readSeq != null", "refIndex >= 0", "readIndex >= 0"})
protected static Cigar leftAlignCigarSequentially(final Cigar cigar, final byte[] refSeq, final byte[] readSeq, int refIndex, int readIndex) {
final Cigar cigarToReturn = new Cigar();
Cigar cigarToAlign = new Cigar();
for (int i = 0; i < cigar.numCigarElements(); i++) {
final CigarElement ce = cigar.getCigarElement(i);
if (ce.getOperator() == CigarOperator.D || ce.getOperator() == CigarOperator.I) {
cigarToAlign.add(ce);
final Cigar leftAligned = AlignmentUtils.leftAlignSingleIndel(cigarToAlign, refSeq, readSeq, refIndex, readIndex, false);
for ( final CigarElement toAdd : leftAligned.getCigarElements() ) { cigarToReturn.add(toAdd); }
refIndex += cigarToAlign.getReferenceLength();
readIndex += cigarToAlign.getReadLength();
cigarToAlign = new Cigar();
} else {
cigarToAlign.add(ce);
}
}
if( !goodAlignment ) {
returnCigar.add(new CigarElement(1, CigarOperator.N));
return returnCigar;
}
final Cigar swCigar = swConsensus.getCigar();
if( swCigar.numCigarElements() > MAX_CIGAR_ELEMENTS_BEFORE_FAILING_SW ) { // this bubble is too divergent from the reference
returnCigar.add(new CigarElement(1, CigarOperator.N));
} else {
for( int iii = 0; iii < swCigar.numCigarElements(); iii++ ) {
// now we need to remove the padding from the cigar string
int length = swCigar.getCigarElement(iii).getLength();
if( iii == 0 ) { length -= padding.length; }
if( iii == swCigar.numCigarElements() - 1 ) { length -= padding.length; }
if( length > 0 ) {
returnCigar.add(new CigarElement(length, swCigar.getCigarElement(iii).getOperator()));
}
}
if( (refBytes == null && returnCigar.getReferenceLength() != 0) || ( refBytes != null && returnCigar.getReferenceLength() != refBytes.length ) ) {
throw new IllegalStateException("SmithWaterman cigar failure: " + (refBytes == null ? "-" : new String(refBytes)) + " against " + new String(bubbleBytes) + " = " + swConsensus.getCigar());
if( !cigarToAlign.isEmpty() ) {
for( final CigarElement toAdd : cigarToAlign.getCigarElements() ) {
cigarToReturn.add(toAdd);
}
}
return returnCigar;
final Cigar result = AlignmentUtils.consolidateCigar(cigarToReturn);
if( result.getReferenceLength() != cigar.getReferenceLength() )
throw new IllegalStateException("leftAlignCigarSequentially failed to produce a valid CIGAR. Reference lengths differ. Initial cigar " + cigar + " left aligned into " + result);
return result;
}
// class to keep track of the bubble state machine
private static class BubbleStateMachine<T extends BaseVertex> {
public boolean inBubble = false;
public byte[] bubbleBytes = null;
public T lastSeenReferenceNode = null;
public Cigar cigar = null;
public BubbleStateMachine( final Cigar initialCigar ) {
inBubble = false;
bubbleBytes = null;
lastSeenReferenceNode = null;
cigar = initialCigar;
}
}
/**
* Tests that this and other have the same score and vertices in the same order with the same seq
* @param other the other path to consider. Cannot be null
* @return true if this and path are equal, false otherwise
*/
public boolean equalScoreAndSequence(final Path<T> other) {
public boolean equalScoreAndSequence(final Path<T,E> other) {
if ( other == null ) throw new IllegalArgumentException("other cannot be null");
return getScore() == other.getScore() && equalSequence(other);
}
@ -432,7 +378,7 @@ public class Path<T extends BaseVertex> {
* @param other the other path to consider. Cannot be null
* @return true if this and path are equal, false otherwise
*/
public boolean equalSequence(final Path<T> other) {
public boolean equalSequence(final Path<T,E> other) {
final List<T> mine = getVertices();
final List<T> yours = other.getVertices();
if ( mine.size() == yours.size() ) { // hehehe

View File

@ -48,6 +48,7 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import org.jgrapht.EdgeFactory;
import java.io.File;
import java.util.HashSet;
@ -61,7 +62,17 @@ import java.util.Set;
* @author: depristo
* @since 03/2013
*/
public final class SeqGraph extends BaseGraph<SeqVertex> {
public final class SeqGraph extends BaseGraph<SeqVertex, BaseEdge> {
/**
* Edge factory that creates non-reference multiplicity 1 edges
*/
private static class MyEdgeFactory implements EdgeFactory<SeqVertex, BaseEdge> {
@Override
public BaseEdge createEdge(SeqVertex sourceVertex, SeqVertex targetVertex) {
return new BaseEdge(false, 1);
}
}
private final static boolean PRINT_SIMPLIFY_GRAPHS = false;
/**
@ -82,7 +93,7 @@ public final class SeqGraph extends BaseGraph<SeqVertex> {
* Construct an empty SeqGraph
*/
public SeqGraph() {
super();
this(11);
}
/**
@ -94,7 +105,7 @@ public final class SeqGraph extends BaseGraph<SeqVertex> {
* @param kmer kmer
*/
public SeqGraph(final int kmer) {
super(kmer);
super(kmer, new MyEdgeFactory());
}
/**
@ -144,21 +155,29 @@ public final class SeqGraph extends BaseGraph<SeqVertex> {
//logger.info("simplifyGraph iteration " + i);
// iterate until we haven't don't anything useful
boolean didSomeWork = false;
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + iteration + ".1.dot"), 0);
printGraphSimplification(new File("simplifyGraph." + iteration + ".1.dot"));
didSomeWork |= new MergeDiamonds().transformUntilComplete();
didSomeWork |= new MergeTails().transformUntilComplete();
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + iteration + ".2.diamonds_and_tails.dot"), 0);
printGraphSimplification(new File("simplifyGraph." + iteration + ".2.diamonds_and_tails.dot"));
didSomeWork |= new SplitCommonSuffices().transformUntilComplete();
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + iteration + ".3.split_suffix.dot"), 0);
printGraphSimplification(new File("simplifyGraph." + iteration + ".3.split_suffix.dot"));
didSomeWork |= new MergeCommonSuffices().transformUntilComplete();
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + iteration + ".4.merge_suffix.dot"), 0);
printGraphSimplification(new File("simplifyGraph." + iteration + ".4.merge_suffix.dot"));
didSomeWork |= new MergeHeadlessIncomingSources().transformUntilComplete();
didSomeWork |= zipLinearChains();
return didSomeWork;
}
/**
* Print simplication step of this graph, if PRINT_SIMPLIFY_GRAPHS is enabled
* @param file the destination for the graph DOT file
*/
private void printGraphSimplification(final File file) {
if ( PRINT_SIMPLIFY_GRAPHS )
subsetToNeighbors(getReferenceSourceVertex(), 5).printGraph(file, 0);
}
/**
* Zip up all of the simple linear chains present in this graph.
*
@ -289,8 +308,8 @@ public final class SeqGraph extends BaseGraph<SeqVertex> {
final BaseEdge inc = new BaseEdge(false, sharedWeightAmongEdges); // template to make .add function call easy
// update the incoming and outgoing edges to point to the new vertex
for( final BaseEdge edge : outEdges ) { addEdge(addedVertex, getEdgeTarget(edge), new BaseEdge(edge).add(inc)); }
for( final BaseEdge edge : inEdges ) { addEdge(getEdgeSource(edge), addedVertex, new BaseEdge(edge).add(inc)); }
for( final BaseEdge edge : outEdges ) { addEdge(addedVertex, getEdgeTarget(edge), edge.copy().add(inc)); }
for( final BaseEdge edge : inEdges ) { addEdge(getEdgeSource(edge), addedVertex, edge.copy().add(inc)); }
removeAllVertices(linearChain);
return true;
@ -342,7 +361,7 @@ public final class SeqGraph extends BaseGraph<SeqVertex> {
* Merge until the graph has no vertices that are candidates for merging
*/
public boolean transformUntilComplete() {
boolean didAtLeastOneTranform = false;
boolean didAtLeastOneTransform = false;
boolean foundNodesToMerge = true;
while( foundNodesToMerge ) {
foundNodesToMerge = false;
@ -350,13 +369,13 @@ public final class SeqGraph extends BaseGraph<SeqVertex> {
for( final SeqVertex v : vertexSet() ) {
foundNodesToMerge = tryToTransform(v);
if ( foundNodesToMerge ) {
didAtLeastOneTranform = true;
didAtLeastOneTransform = true;
break;
}
}
}
return didAtLeastOneTranform;
return didAtLeastOneTransform;
}
/**
@ -505,40 +524,4 @@ public final class SeqGraph extends BaseGraph<SeqVertex> {
}
}
}
/**
* Merge headless configurations:
*
* Performs the transformation:
*
* { x + S_i + y -> Z }
*
* goes to:
*
* { x -> S_i -> y -> Z }
*
* for all nodes that match this configuration.
*
* Differs from the diamond transform in that no top node is required
*/
protected class MergeHeadlessIncomingSources extends VertexBasedTransformer {
@Override
boolean tryToTransform(final SeqVertex bottom) {
final Set<SeqVertex> incoming = incomingVerticesOf(bottom);
if ( incoming.size() <= 1 )
return false;
for ( final SeqVertex inc : incoming )
if ( ! isSource(inc) || outDegreeOf(inc) > 1 )
return false;
if ( dontModifyGraphEvenIfPossible() ) return true;
final SharedVertexSequenceSplitter splitter = new SharedVertexSequenceSplitter(SeqGraph.this, incoming);
if (splitter.meetsMinMergableSequenceForPrefix(MIN_COMMON_SEQUENCE_TO_MERGE_SOURCE_SINK_VERTICES))
return splitter.splitAndUpdate(null, bottom);
else
return false;
}
}
}

View File

@ -49,6 +49,7 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import com.google.java.contract.Requires;
import org.broadinstitute.sting.utils.Utils;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A graph vertex containing a sequence of bases and a unique ID that
@ -71,8 +72,9 @@ import java.util.Arrays;
* @since 03/2013
*/
public final class SeqVertex extends BaseVertex {
private static int idCounter = 0;
public final int id;
// Note that using an AtomicInteger is critical to allow multi-threaded HaplotypeCaller
private static final AtomicInteger idCounter = new AtomicInteger(0);
private int id = idCounter.getAndIncrement();
/**
* Create a new SeqVertex with sequence and the next available id
@ -80,7 +82,6 @@ public final class SeqVertex extends BaseVertex {
*/
public SeqVertex(final byte[] sequence) {
super(sequence);
this.id = idCounter++;
}
/**
@ -89,7 +90,6 @@ public final class SeqVertex extends BaseVertex {
*/
public SeqVertex(final String sequence) {
super(sequence);
this.id = idCounter++;
}
/**

View File

@ -81,20 +81,20 @@ public class SharedSequenceMerger {
else {
// graph.printGraph(new File("csm." + counter + "." + v.getSequenceString() + "_pre.dot"), 0);
final List<BaseEdge> edgesToRemove = new LinkedList<BaseEdge>();
final List<BaseEdge> edgesToRemove = new LinkedList<>();
final byte[] prevSeq = prevs.iterator().next().getSequence();
final SeqVertex newV = new SeqVertex(ArrayUtils.addAll(prevSeq, v.getSequence()));
graph.addVertex(newV);
for ( final SeqVertex prev : prevs ) {
for ( final BaseEdge prevIn : graph.incomingEdgesOf(prev) ) {
graph.addEdge(graph.getEdgeSource(prevIn), newV, new BaseEdge(prevIn));
graph.addEdge(graph.getEdgeSource(prevIn), newV, prevIn.copy());
edgesToRemove.add(prevIn);
}
}
for ( final BaseEdge e : graph.outgoingEdgesOf(v) ) {
graph.addEdge(newV, graph.getEdgeTarget(e), new BaseEdge(e));
graph.addEdge(newV, graph.getEdgeTarget(e), e.copy());
}
graph.removeAllVertices(prevs);
@ -124,11 +124,17 @@ public class SharedSequenceMerger {
final SeqVertex first = incomingVertices.iterator().next();
for ( final SeqVertex prev : incomingVertices) {
if ( ! prev.seqEquals(first) )
// cannot merge if our sequence isn't the same as the first sequence
return false;
final Collection<SeqVertex> prevOuts = graph.outgoingVerticesOf(prev);
if ( prevOuts.size() != 1 )
// prev -> v must be the only edge from prev
return false;
if ( prevOuts.iterator().next() != v )
// don't allow cyles
return false;
if ( graph.inDegreeOf(prev) == 0 )
// cannot merge when any of the incoming nodes are sources
return false;
}

View File

@ -209,7 +209,7 @@ public class SharedVertexSequenceSplitter {
splitGraph.addEdge(remaining, suffixV, fromMid);
} else {
// prefix + suffix completely explain this node
splitGraph.addOrUpdateEdge(prefixV, suffixV, new BaseEdge(toMid).add(fromMid));
splitGraph.addOrUpdateEdge(prefixV, suffixV, toMid.copy().add(fromMid));
}
}
}
@ -323,7 +323,7 @@ public class SharedVertexSequenceSplitter {
} else {
// schedule edge for removal, and return a freshly allocated one for our graph to use
edgesToRemove.add(e);
return new BaseEdge(e);
return e.copy();
}
}
}

View File

@ -0,0 +1,121 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.readthreading;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.DeBruijnVertex;
import org.broadinstitute.sting.utils.Utils;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A DeBruijnVertex that supports multiple copies of the same kmer
*
* This is implemented through the same mechanism as SeqVertex, where each
* created MultiDeBruijnVertex has a unique id assigned upon creation. Two
* MultiDeBruijnVertex are equal iff they have the same ID
*
* User: depristo
* Date: 4/17/13
* Time: 3:20 PM
*/
final class MultiDeBruijnVertex extends DeBruijnVertex {
private final static boolean KEEP_TRACK_OF_READS = false;
// Note that using an AtomicInteger is critical to allow multi-threaded HaplotypeCaller
private static final AtomicInteger idCounter = new AtomicInteger(0);
private int id = idCounter.getAndIncrement();
private final List<String> reads = new LinkedList<String>();
/**
* Create a new MultiDeBruijnVertex with kmer sequence
* @param sequence the kmer sequence
*/
MultiDeBruijnVertex(byte[] sequence) {
super(sequence);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MultiDeBruijnVertex that = (MultiDeBruijnVertex) o;
return id == that.id;
}
@Override
public String toString() {
return "MultiDeBruijnVertex_id_" + id + "_seq_" + getSequenceString();
}
/**
* Add name information to this vertex for debugging
*
* This information will be captured as a list of strings, and displayed in DOT if this
* graph is written out to disk
*
* This functionality is only enabled when KEEP_TRACK_OF_READS is true
*
* @param name a non-null string
*/
protected void addRead(final String name) {
if ( name == null ) throw new IllegalArgumentException("name cannot be null");
if ( KEEP_TRACK_OF_READS ) reads.add(name);
}
@Override
public int hashCode() { return id; }
@Override
public String additionalInfo() {
return KEEP_TRACK_OF_READS ? (! reads.contains("ref") ? "__" + Utils.join(",", reads) : "") : "";
}
}

View File

@ -0,0 +1,224 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.readthreading;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.LocalAssemblyEngine;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.*;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import java.io.File;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
public class ReadThreadingAssembler extends LocalAssemblyEngine {
private final static Logger logger = Logger.getLogger(ReadThreadingAssembler.class);
private final static int DEFAULT_NUM_PATHS_PER_GRAPH = 128;
private final static int GGA_MODE_ARTIFICIAL_COUNTS = 1000;
private final static int KMER_SIZE_ITERATION_INCREASE = 10;
private final static int MAX_KMER_ITERATIONS_TO_ATTEMPT = 6;
/** The min and max kmer sizes to try when building the graph. */
private final List<Integer> kmerSizes;
private final int maxAllowedPathsForReadThreadingAssembler;
private final boolean dontIncreaseKmerSizesForCycles;
private final int numPruningSamples;
private boolean requireReasonableNumberOfPaths = false;
protected boolean removePathsNotConnectedToRef = true;
private boolean justReturnRawGraph = false;
/** for testing only */
public ReadThreadingAssembler() {
this(DEFAULT_NUM_PATHS_PER_GRAPH, Arrays.asList(25));
}
public ReadThreadingAssembler(final int maxAllowedPathsForReadThreadingAssembler, final List<Integer> kmerSizes, final boolean dontIncreaseKmerSizesForCycles, final int numPruningSamples) {
super(maxAllowedPathsForReadThreadingAssembler);
this.kmerSizes = kmerSizes;
this.maxAllowedPathsForReadThreadingAssembler = maxAllowedPathsForReadThreadingAssembler;
this.dontIncreaseKmerSizesForCycles = dontIncreaseKmerSizesForCycles;
this.numPruningSamples = numPruningSamples;
}
public ReadThreadingAssembler(final int maxAllowedPathsForReadThreadingAssembler, final List<Integer> kmerSizes) {
this(maxAllowedPathsForReadThreadingAssembler, kmerSizes, true, 1);
}
/** for testing purposes */
protected void setJustReturnRawGraph(boolean justReturnRawGraph) {
this.justReturnRawGraph = justReturnRawGraph;
}
@Override
public List<SeqGraph> assemble(final List<GATKSAMRecord> reads, final Haplotype refHaplotype, final List<Haplotype> activeAlleleHaplotypes) {
final List<SeqGraph> graphs = new LinkedList<>();
// first, try using the requested kmer sizes
for ( final int kmerSize : kmerSizes ) {
final SeqGraph graph = createGraph(reads, refHaplotype, kmerSize, activeAlleleHaplotypes, dontIncreaseKmerSizesForCycles);
if ( graph != null )
graphs.add(graph);
}
// if none of those worked, iterate over larger sizes if allowed to do so
if ( graphs.isEmpty() && !dontIncreaseKmerSizesForCycles ) {
int kmerSize = MathUtils.arrayMaxInt(kmerSizes) + KMER_SIZE_ITERATION_INCREASE;
int numIterations = 1;
while ( graphs.isEmpty() && numIterations <= MAX_KMER_ITERATIONS_TO_ATTEMPT ) {
// on the last attempt we will allow low complexity graphs
final SeqGraph graph = createGraph(reads, refHaplotype, kmerSize, activeAlleleHaplotypes, numIterations == MAX_KMER_ITERATIONS_TO_ATTEMPT);
if ( graph != null )
graphs.add(graph);
kmerSize += KMER_SIZE_ITERATION_INCREASE;
numIterations++;
}
}
return graphs;
}
/**
* Creates the sequence graph for the given kmerSize
*
* @param reads reads to use
* @param refHaplotype reference haplotype
* @param kmerSize kmer size
* @param activeAlleleHaplotypes the GGA haplotypes to inject into the graph
* @param allowLowComplexityGraphs if true, do not check for low-complexity graphs
* @return sequence graph or null if one could not be created (e.g. because it contains cycles or too many paths or is low complexity)
*/
protected SeqGraph createGraph(final List<GATKSAMRecord> reads,
final Haplotype refHaplotype,
final int kmerSize,
final List<Haplotype> activeAlleleHaplotypes,
final boolean allowLowComplexityGraphs) {
final ReadThreadingGraph rtgraph = new ReadThreadingGraph(kmerSize, debugGraphTransformations, minBaseQualityToUseInAssembly, numPruningSamples);
// add the reference sequence to the graph
rtgraph.addSequence("ref", refHaplotype.getBases(), null, true);
// add the artificial GGA haplotypes to the graph
int hapCount = 0;
for ( final Haplotype h : activeAlleleHaplotypes ) {
final int[] counts = new int[h.length()];
Arrays.fill(counts, GGA_MODE_ARTIFICIAL_COUNTS);
rtgraph.addSequence("activeAllele" + hapCount++, h.getBases(), counts, false);
}
// Next pull kmers out of every read and throw them on the graph
for( final GATKSAMRecord read : reads ) {
rtgraph.addRead(read);
}
// actually build the read threading graph
rtgraph.buildGraphIfNecessary();
// sanity check: make sure there are no cycles in the graph
if ( rtgraph.hasCycles() ) {
if ( debug ) logger.info("Not using kmer size of " + kmerSize + " in read threading assembler because it contains a cycle");
return null;
}
// sanity check: make sure the graph had enough complexity with the given kmer
if ( ! allowLowComplexityGraphs && rtgraph.isLowComplexity() ) {
if ( debug ) logger.info("Not using kmer size of " + kmerSize + " in read threading assembler because it does not produce a graph with enough complexity");
return null;
}
printDebugGraphTransform(rtgraph, new File("sequenceGraph.0.0.raw_readthreading_graph.dot"));
// go through and prune all of the chains where all edges have <= pruneFactor. This must occur
// before recoverDanglingTails in the graph, so that we don't spend a ton of time recovering
// tails that we'll ultimately just trim away anyway, as the dangling tail edges have weight of 1
rtgraph.pruneLowWeightChains(pruneFactor);
// look at all chains in the graph that terminate in a non-ref node (dangling sinks) and see if
// we can recover them by merging some N bases from the chain back into the reference
if ( recoverDanglingTails ) rtgraph.recoverDanglingTails();
// remove all heading and trailing paths
if ( removePathsNotConnectedToRef ) rtgraph.removePathsNotConnectedToRef();
printDebugGraphTransform(rtgraph, new File("sequenceGraph.0.1.cleaned_readthreading_graph.dot"));
final SeqGraph initialSeqGraph = rtgraph.convertToSequenceGraph();
// if the unit tests don't want us to cleanup the graph, just return the raw sequence graph
if ( justReturnRawGraph ) return initialSeqGraph;
if ( debug ) logger.info("Using kmer size of " + rtgraph.getKmerSize() + " in read threading assembler");
printDebugGraphTransform(initialSeqGraph, new File("sequenceGraph.0.2.initial_seqgraph.dot"));
initialSeqGraph.cleanNonRefPaths(); // TODO -- I don't this is possible by construction
final SeqGraph seqGraph = cleanupSeqGraph(initialSeqGraph);
return ( seqGraph != null && requireReasonableNumberOfPaths && !reasonableNumberOfPaths(seqGraph) ) ? null : seqGraph;
}
/**
* Did we find a reasonable number of paths in this graph?
* @param graph
* @return
*/
private boolean reasonableNumberOfPaths(final SeqGraph graph) {
final KBestPaths<SeqVertex,BaseEdge> pathFinder = new KBestPaths<>(false);
final List<Path<SeqVertex,BaseEdge>> allPaths = pathFinder.getKBestPaths(graph, 100000);
logger.info("Found " + allPaths.size() + " paths through " + graph + " with maximum " + maxAllowedPathsForReadThreadingAssembler);
return allPaths.size() <= maxAllowedPathsForReadThreadingAssembler;
}
@Override
public String toString() {
return "ReadThreadingAssembler{" +
"kmerSizes=" + kmerSizes +
'}';
}
}

View File

@ -0,0 +1,785 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.readthreading;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.KMerCounter;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.Kmer;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.*;
import org.broadinstitute.sting.utils.BaseUtils;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.smithwaterman.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.smithwaterman.SmithWaterman;
import org.jgrapht.EdgeFactory;
import org.jgrapht.alg.CycleDetector;
import java.io.File;
import java.util.*;
public class ReadThreadingGraph extends BaseGraph<MultiDeBruijnVertex, MultiSampleEdge> {
/**
* Edge factory that encapsulates the numPruningSamples assembly parameter
*/
private static class MyEdgeFactory implements EdgeFactory<MultiDeBruijnVertex, MultiSampleEdge> {
final int numPruningSamples;
public MyEdgeFactory(int numPruningSamples) {
this.numPruningSamples = numPruningSamples;
}
@Override
public MultiSampleEdge createEdge(final MultiDeBruijnVertex sourceVertex, final MultiDeBruijnVertex targetVertex) {
return new MultiSampleEdge(false, 1, numPruningSamples);
}
public MultiSampleEdge createEdge(final boolean isRef, final int multiplicity) {
return new MultiSampleEdge(isRef, multiplicity, numPruningSamples);
}
}
private final static Logger logger = Logger.getLogger(ReadThreadingGraph.class);
private final static String ANONYMOUS_SAMPLE = "XXX_UNNAMED_XXX";
private final static boolean WRITE_GRAPH = false;
private final static boolean DEBUG_NON_UNIQUE_CALC = false;
/** for debugging info printing */
private static int counter = 0;
/**
* Sequences added for read threading before we've actually built the graph
*/
private final Map<String, List<SequenceForKmers>> pending = new LinkedHashMap<>();
/**
* A set of non-unique kmers that cannot be used as merge points in the graph
*/
private Set<Kmer> nonUniqueKmers;
/**
* A map from kmers -> their corresponding vertex in the graph
*/
private Map<Kmer, MultiDeBruijnVertex> uniqueKmers = new LinkedHashMap<>();
/**
*
*/
final int kmerSize;
final boolean debugGraphTransformations;
final byte minBaseQualityToUseInAssembly;
protected boolean increaseCountsBackwards = true;
protected boolean increaseCountsThroughBranches = false; // this may increase the branches without bounds
// --------------------------------------------------------------------------------
// state variables, initialized in resetToInitialState()
// --------------------------------------------------------------------------------
private Kmer refSource;
private boolean alreadyBuilt;
public ReadThreadingGraph() {
this(25, false, (byte)6, 1);
}
public ReadThreadingGraph(final int kmerSize) {
this(kmerSize, false, (byte)6, 1);
}
/**
* Create a new ReadThreadingAssembler using kmerSize for matching
* @param kmerSize must be >= 1
*/
protected ReadThreadingGraph(final int kmerSize, final boolean debugGraphTransformations, final byte minBaseQualityToUseInAssembly, final int numPruningSamples) {
super(kmerSize, new MyEdgeFactory(numPruningSamples));
if ( kmerSize < 1 ) throw new IllegalArgumentException("bad minkKmerSize " + kmerSize);
this.kmerSize = kmerSize;
this.debugGraphTransformations = debugGraphTransformations;
this.minBaseQualityToUseInAssembly = minBaseQualityToUseInAssembly;
resetToInitialState();
}
/**
* Reset this assembler to its initial state, so we can create another assembly with a different set of reads
*/
private void resetToInitialState() {
pending.clear();
nonUniqueKmers = null;
uniqueKmers.clear();
refSource = null;
alreadyBuilt = false;
}
/**
* Add the all bases in sequence to the graph
* @param sequence a non-null sequence
* @param isRef is this the reference sequence?
*/
protected void addSequence(final byte[] sequence, final boolean isRef) {
addSequence("anonymous", sequence, null, isRef);
}
/**
* Add all bases in sequence to this graph
*
* @see #addSequence(String, String, byte[], int, int, int[], boolean) for full information
*/
public void addSequence(final String seqName, final byte[] sequence, final int[] counts, final boolean isRef) {
addSequence(seqName, ANONYMOUS_SAMPLE, sequence, 0, sequence.length, counts, isRef);
}
/**
* Add bases in sequence to this graph
*
* @param seqName a useful seqName for this read, for debugging purposes
* @param sequence non-null sequence of bases
* @param counts a vector of counts for each bases, indicating how many times that base was observed in the sequence.
* This allows us to support reduced reads in the ReadThreadingAssembler. Can be null, meaning that
* each base is only observed once. If not null, must have length == sequence.length.
* @param start the first base offset in sequence that we should use for constructing the graph using this sequence, inclusive
* @param stop the last base offset in sequence that we should use for constructing the graph using this sequence, exclusive
* @param isRef is this the reference sequence.
*/
public void addSequence(final String seqName, final String sampleName, final byte[] sequence, final int start, final int stop, final int[] counts, final boolean isRef) {
// note that argument testing is taken care of in SequenceForKmers
if ( alreadyBuilt ) throw new IllegalStateException("Graph already built");
// get the list of sequences for this sample
List<SequenceForKmers> sampleSequences = pending.get(sampleName);
if ( sampleSequences == null ) { // need to create
sampleSequences = new LinkedList<>();
pending.put(sampleName, sampleSequences);
}
// add the new sequence to the list of sequences for sample
sampleSequences.add(new SequenceForKmers(seqName, sequence, start, stop, counts, isRef));
}
/**
* Return a count appropriate for a kmer starting at kmerStart in sequence for kmers
*
* @param seqForKmers a non-null sequence for kmers object
* @param kmerStart the position where the kmer starts in sequence
* @return a count for a kmer from start -> start + kmerSize in seqForKmers
*/
private int getCountGivenKmerStart(final SequenceForKmers seqForKmers, final int kmerStart) {
return seqForKmers.getCount(kmerStart + kmerSize - 1);
}
/**
* Thread sequence seqForKmers through the current graph, updating the graph as appropriate
* @param seqForKmers a non-null sequence
*/
private void threadSequence(final SequenceForKmers seqForKmers) {
final Pair<MultiDeBruijnVertex,Integer> startingInfo = findStart(seqForKmers);
if ( startingInfo == null )
return;
final MultiDeBruijnVertex startingVertex = startingInfo.getFirst();
final int uniqueStartPos = startingInfo.getSecond();
// increase the counts of all edges incoming into the starting vertex supported by going back in sequence
if ( increaseCountsBackwards )
increaseCountsInMatchedKmers(seqForKmers, startingVertex, startingVertex.getSequence(), kmerSize - 2);
if ( debugGraphTransformations ) startingVertex.addRead(seqForKmers.name);
// keep track of information about the reference source
if ( seqForKmers.isRef ) {
if ( refSource != null ) throw new IllegalStateException("Found two refSources! prev: " + refSource + ", new: " + startingVertex);
refSource = new Kmer(seqForKmers.sequence, seqForKmers.start, kmerSize);
}
// loop over all of the bases in sequence, extending the graph by one base at each point, as appropriate
MultiDeBruijnVertex vertex = startingVertex;
for ( int i = uniqueStartPos + 1; i <= seqForKmers.stop - kmerSize; i++ ) {
final int count = getCountGivenKmerStart(seqForKmers, i);
vertex = extendChainByOne(vertex, seqForKmers.sequence, i, count, seqForKmers.isRef);
if ( debugGraphTransformations ) vertex.addRead(seqForKmers.name);
}
}
/**
* Class to keep track of the important dangling tail merging data
*/
protected final class DanglingTailMergeResult {
final List<MultiDeBruijnVertex> danglingPath, referencePath;
final byte[] danglingPathString, referencePathString;
final Cigar cigar;
public DanglingTailMergeResult(final List<MultiDeBruijnVertex> danglingPath,
final List<MultiDeBruijnVertex> referencePath,
final byte[] danglingPathString,
final byte[] referencePathString,
final Cigar cigar) {
this.danglingPath = danglingPath;
this.referencePath = referencePath;
this.danglingPathString = danglingPathString;
this.referencePathString = referencePathString;
this.cigar = cigar;
}
}
/**
* Attempt to attach vertex with out-degree == 0 to the graph
*
* @param vertex the vertex to recover
* @return 1 if we successfully recovered the vertex and 0 otherwise
*/
protected int recoverDanglingChain(final MultiDeBruijnVertex vertex) {
if ( outDegreeOf(vertex) != 0 ) throw new IllegalStateException("Attempting to recover a dangling tail for " + vertex + " but it has out-degree > 0");
// generate the CIGAR string from Smith-Waterman between the dangling tail and reference paths
final DanglingTailMergeResult danglingTailMergeResult = generateCigarAgainstReferencePath(vertex);
// if the CIGAR is too complex (or couldn't be computed) then we do not allow the merge into the reference path
if ( danglingTailMergeResult == null || ! cigarIsOkayToMerge(danglingTailMergeResult.cigar) )
return 0;
// merge
return mergeDanglingTail(danglingTailMergeResult);
}
/**
* Determine whether the provided cigar is okay to merge into the reference path
*
* @param cigar the cigar to analyze
* @return true if it's okay to merge, false otherwise
*/
protected boolean cigarIsOkayToMerge(final Cigar cigar) {
final List<CigarElement> elements = cigar.getCigarElements();
// don't allow more than a couple of different ops
if ( elements.size() > 3 )
return false;
// the last element must be an M
if ( elements.get(elements.size() - 1).getOperator() != CigarOperator.M )
return false;
// TODO -- do we want to check whether the Ms mismatch too much also?
return true;
}
/**
* Actually merge the dangling tail if possible
*
* @param danglingTailMergeResult the result from generating a Cigar for the dangling tail against the reference
* @return 1 if merge was successful, 0 otherwise
*/
protected int mergeDanglingTail(final DanglingTailMergeResult danglingTailMergeResult) {
final List<CigarElement> elements = danglingTailMergeResult.cigar.getCigarElements();
final CigarElement lastElement = elements.get(elements.size() - 1);
if ( lastElement.getOperator() != CigarOperator.M )
throw new IllegalArgumentException("The last Cigar element must be an M");
final int lastRefIndex = danglingTailMergeResult.cigar.getReferenceLength() - 1;
final int matchingSuffix = Math.min(GraphUtils.longestSuffixMatch(danglingTailMergeResult.referencePathString, danglingTailMergeResult.danglingPathString, lastRefIndex), lastElement.getLength());
if ( matchingSuffix == 0 )
return 0;
final int altIndexToMerge = Math.max(danglingTailMergeResult.cigar.getReadLength() - matchingSuffix - 1, 0);
final int refIndexToMerge = lastRefIndex - matchingSuffix + 1;
addEdge(danglingTailMergeResult.danglingPath.get(altIndexToMerge), danglingTailMergeResult.referencePath.get(refIndexToMerge), ((MyEdgeFactory)getEdgeFactory()).createEdge(false, 1));
return 1;
}
/**
* Generates the CIGAR string from the Smith-Waterman alignment of the dangling path (where the
* provided vertex is the sink) and the reference path.
*
* @param vertex the sink of the dangling tail
* @return a SmithWaterman object which can be null if no proper alignment could be generated
*/
protected DanglingTailMergeResult generateCigarAgainstReferencePath(final MultiDeBruijnVertex vertex) {
// find the lowest common ancestor path between vertex and the reference sink if available
final List<MultiDeBruijnVertex> altPath = findPathToLowestCommonAncestorOfReference(vertex);
if ( altPath == null || isRefSource(altPath.get(0)) )
return null;
// now get the reference path from the LCA
final List<MultiDeBruijnVertex> refPath = getReferencePath(altPath.get(0));
// create the Smith-Waterman strings to use
final byte[] refBases = getBasesForPath(refPath);
final byte[] altBases = getBasesForPath(altPath);
// run Smith-Waterman to determine the best alignment (and remove trailing deletions since they aren't interesting)
final SmithWaterman alignment = new SWPairwiseAlignment(refBases, altBases, SWPairwiseAlignment.OVERHANG_STRATEGY.INDEL);
return new DanglingTailMergeResult(altPath, refPath, altBases, refBases, AlignmentUtils.removeTrailingDeletions(alignment.getCigar()));
}
/**
* Finds the path upwards in the graph from this vertex to the reference sequence, including the lowest common ancestor vertex
*
* @param vertex the original vertex
* @return the path if it can be determined or null if this vertex either doesn't merge onto the reference path or
* has an ancestor with multiple incoming edges before hitting the reference path
*/
protected List<MultiDeBruijnVertex> findPathToLowestCommonAncestorOfReference(final MultiDeBruijnVertex vertex) {
final LinkedList<MultiDeBruijnVertex> path = new LinkedList<>();
MultiDeBruijnVertex v = vertex;
while ( ! isReferenceNode(v) && inDegreeOf(v) == 1 ) {
path.addFirst(v);
v = getEdgeSource(incomingEdgeOf(v));
}
path.addFirst(v);
return isReferenceNode(v) ? path : null;
}
/**
* Finds the path downwards in the graph from this vertex to the reference sink, including this vertex
*
* @param start the reference vertex to start from
* @return the path (non-null, non-empty)
*/
protected List<MultiDeBruijnVertex> getReferencePath(final MultiDeBruijnVertex start) {
if ( ! isReferenceNode(start) ) throw new IllegalArgumentException("Cannot construct the reference path from a vertex that is not on that path");
final List<MultiDeBruijnVertex> path = new ArrayList<>();
MultiDeBruijnVertex v = start;
while ( v != null ) {
path.add(v);
v = getNextReferenceVertex(v);
}
return path;
}
/**
* Build the read threaded assembly graph if it hasn't already been constructed from the sequences that have
* been added to the graph.
*/
public void buildGraphIfNecessary() {
if ( alreadyBuilt ) return;
// determine the kmer size we'll use, and capture the set of nonUniques for that kmer size
final NonUniqueResult result = determineKmerSizeAndNonUniques(kmerSize, kmerSize);
nonUniqueKmers = result.nonUniques;
if ( DEBUG_NON_UNIQUE_CALC ) {
logger.info("using " + kmerSize + " kmer size for this assembly with the following non-uniques");
}
// go through the pending sequences, and add them to the graph
for ( final List<SequenceForKmers> sequencesForSample : pending.values() ) {
for ( final SequenceForKmers sequenceForKmers : sequencesForSample ) {
threadSequence(sequenceForKmers);
if ( WRITE_GRAPH ) printGraph(new File("threading." + counter++ + "." + sequenceForKmers.name.replace(" ", "_") + ".dot"), 0);
}
// flush the single sample edge values from the graph
for ( final MultiSampleEdge e : edgeSet() ) e.flushSingleSampleMultiplicity();
}
// clear
pending.clear();
alreadyBuilt = true;
}
/**
* @return true if the graph has cycles, false otherwise
*/
public boolean hasCycles() {
return new CycleDetector<>(this).detectCycles();
}
/**
* Does the graph not have enough complexity? We define low complexity as a situation where the number
* of non-unique kmers is more than 20% of the total number of kmers.
*
* @return true if the graph has low complexity, false otherwise
*/
public boolean isLowComplexity() {
return nonUniqueKmers.size() * 4 > uniqueKmers.size();
}
public void recoverDanglingTails() {
if ( ! alreadyBuilt ) throw new IllegalStateException("recoverDanglingTails requires the graph be already built");
int attempted = 0;
int nRecovered = 0;
for ( final MultiDeBruijnVertex v : vertexSet() ) {
if ( outDegreeOf(v) == 0 && ! isRefNodeAndRefSink(v) ) {
attempted++;
nRecovered += recoverDanglingChain(v);
}
}
if ( debugGraphTransformations ) logger.info("Recovered " + nRecovered + " of " + attempted + " dangling tails");
}
/** structure that keeps track of the non-unique kmers for a given kmer size */
private static class NonUniqueResult {
final Set<Kmer> nonUniques;
final int kmerSize;
private NonUniqueResult(Set<Kmer> nonUniques, int kmerSize) {
this.nonUniques = nonUniques;
this.kmerSize = kmerSize;
}
}
/**
* Compute the smallest kmer size >= minKmerSize and <= maxKmerSize that has no non-unique kmers
* among all sequences added to the current graph. Will always return a result for maxKmerSize if
* all smaller kmers had non-unique kmers.
*
* @param minKmerSize the minimum kmer size to consider when constructing the graph
* @param maxKmerSize the maximum kmer size to consider
* @return a non-null NonUniqueResult
*/
protected NonUniqueResult determineKmerSizeAndNonUniques(final int minKmerSize, final int maxKmerSize) {
final Collection<SequenceForKmers> withNonUniques = getAllPendingSequences();
final Set<Kmer> nonUniqueKmers = new HashSet<Kmer>();
// go through the sequences and determine which kmers aren't unique within each read
int kmerSize = minKmerSize;
for ( ; kmerSize <= maxKmerSize; kmerSize++) {
// clear out set of non-unique kmers
nonUniqueKmers.clear();
// loop over all sequences that have non-unique kmers in them from the previous iterator
final Iterator<SequenceForKmers> it = withNonUniques.iterator();
while ( it.hasNext() ) {
final SequenceForKmers sequenceForKmers = it.next();
// determine the non-unique kmers for this sequence
final Collection<Kmer> nonUniquesFromSeq = determineNonUniqueKmers(sequenceForKmers, kmerSize);
if ( nonUniquesFromSeq.isEmpty() ) {
// remove this sequence from future consideration
it.remove();
} else {
// keep track of the non-uniques for this kmerSize, and keep it in the list of sequences that have non-uniques
nonUniqueKmers.addAll(nonUniquesFromSeq);
}
}
if ( nonUniqueKmers.isEmpty() )
// this kmerSize produces no non-unique sequences, so go ahead and use it for our assembly
break;
}
// necessary because the loop breaks with kmerSize = max + 1
return new NonUniqueResult(nonUniqueKmers, Math.min(kmerSize, maxKmerSize));
}
/**
* Get the collection of all sequences for kmers across all samples in no particular order
* @return non-null Collection
*/
private Collection<SequenceForKmers> getAllPendingSequences() {
final LinkedList<SequenceForKmers> result = new LinkedList<SequenceForKmers>();
for ( final List<SequenceForKmers> oneSampleWorth : pending.values() ) result.addAll(oneSampleWorth);
return result;
}
/**
* Get the collection of non-unique kmers from sequence for kmer size kmerSize
* @param seqForKmers a sequence to get kmers from
* @param kmerSize the size of the kmers
* @return a non-null collection of non-unique kmers in sequence
*/
private Collection<Kmer> determineNonUniqueKmers(final SequenceForKmers seqForKmers, final int kmerSize) {
// count up occurrences of kmers within each read
final KMerCounter counter = new KMerCounter(kmerSize);
final int stopPosition = seqForKmers.stop - kmerSize;
for ( int i = 0; i <= stopPosition; i++ ) {
final Kmer kmer = new Kmer(seqForKmers.sequence, i, kmerSize);
counter.addKmer(kmer, 1);
}
return counter.getKmersWithCountsAtLeast(2);
}
/**
* Convert this kmer graph to a simple sequence graph.
*
* Each kmer suffix shows up as a distinct SeqVertex, attached in the same structure as in the kmer
* graph. Nodes that are sources are mapped to SeqVertex nodes that contain all of their sequence
*
* @return a newly allocated SequenceGraph
*/
// TODO -- should override base class method
public SeqGraph convertToSequenceGraph() {
buildGraphIfNecessary();
final SeqGraph seqGraph = new SeqGraph(kmerSize);
final Map<MultiDeBruijnVertex, SeqVertex> vertexMap = new HashMap<MultiDeBruijnVertex, SeqVertex>();
// create all of the equivalent seq graph vertices
for ( final MultiDeBruijnVertex dv : vertexSet() ) {
final SeqVertex sv = new SeqVertex(dv.getAdditionalSequence(isSource(dv)));
sv.setAdditionalInfo(dv.additionalInfo());
vertexMap.put(dv, sv);
seqGraph.addVertex(sv);
}
// walk through the nodes and connect them to their equivalent seq vertices
for( final MultiSampleEdge e : edgeSet() ) {
final SeqVertex seqInV = vertexMap.get(getEdgeSource(e));
final SeqVertex seqOutV = vertexMap.get(getEdgeTarget(e));
//logger.info("Adding edge " + seqInV + " -> " + seqOutV);
seqGraph.addEdge(seqInV, seqOutV, new BaseEdge(e.isRef(), e.getMultiplicity()));
}
return seqGraph;
}
private void increaseCountsInMatchedKmers(final SequenceForKmers seqForKmers,
final MultiDeBruijnVertex vertex,
final byte[] originalKmer,
final int offset) {
if ( offset == -1 ) return;
for ( final MultiSampleEdge edge : incomingEdgesOf(vertex) ) {
final MultiDeBruijnVertex prev = getEdgeSource(edge);
final byte suffix = prev.getSuffix();
final byte seqBase = originalKmer[offset];
// logger.warn(String.format("Increasing counts for %s -> %s via %s at %d with suffix %s vs. %s",
// prev, vertex, edge, offset, (char)suffix, (char)seqBase));
if ( suffix == seqBase && (increaseCountsThroughBranches || inDegreeOf(vertex) == 1) ) {
edge.incMultiplicity(seqForKmers.getCount(offset));
increaseCountsInMatchedKmers(seqForKmers, prev, originalKmer, offset-1);
}
}
}
/**
* Find vertex and its position in seqForKmers where we should start assembling seqForKmers
*
* @param seqForKmers the sequence we want to thread into the graph
* @return a pair of the starting vertex and its position in seqForKmer
*/
private Pair<MultiDeBruijnVertex, Integer> findStart(final SequenceForKmers seqForKmers) {
final int uniqueStartPos = seqForKmers.isRef ? 0 : findUniqueStartPosition(seqForKmers.sequence, seqForKmers.start, seqForKmers.stop);
if ( uniqueStartPos == -1 )
return null;
return getOrCreateKmerVertex(seqForKmers.sequence, uniqueStartPos, true);
}
/**
* Find a starting point in sequence that begins a unique kmer among all kmers in the graph
* @param sequence the sequence of bases
* @param start the first base to use in sequence
* @param stop the last base to use in sequence
* @return the index into sequence that begins a unique kmer of size kmerSize, or -1 if none could be found
*/
private int findUniqueStartPosition(final byte[] sequence, final int start, final int stop) {
for ( int i = start; i < stop - kmerSize; i++ ) {
final Kmer kmer1 = new Kmer(sequence, i, kmerSize);
if ( uniqueKmers.containsKey(kmer1) )
return i;
}
return -1;
}
/**
* Get the vertex for the kmer in sequence starting at start
* @param sequence the sequence
* @param start the position of the kmer start
* @param allowRefSource if true, we will allow matches to the kmer that represents the reference starting kmer
* @return a non-null vertex
*/
private Pair<MultiDeBruijnVertex, Integer> getOrCreateKmerVertex(final byte[] sequence, final int start, final boolean allowRefSource) {
final Kmer kmer = new Kmer(sequence, start, kmerSize);
final MultiDeBruijnVertex vertex = getUniqueKmerVertex(kmer, allowRefSource);
if ( vertex != null ) {
return new Pair<>(vertex, start);
} else {
return new Pair<>(createVertex(kmer), start);
}
}
/**
* Get the unique vertex for kmer, or null if not possible.
*
* @param allowRefSource if true, we will allow kmer to match the reference source vertex
* @return a vertex for kmer, or null if it's not unique
*/
private MultiDeBruijnVertex getUniqueKmerVertex(final Kmer kmer, final boolean allowRefSource) {
if ( ! allowRefSource && kmer.equals(refSource) ) return null;
return uniqueKmers.get(kmer);
}
/**
* Create a new vertex for kmer. Add it to the uniqueKmers map if appropriate.
*
* kmer must not have a entry in unique kmers, or an error will be thrown
*
* @param kmer the kmer we want to create a vertex for
* @return the non-null created vertex
*/
private MultiDeBruijnVertex createVertex(final Kmer kmer) {
final MultiDeBruijnVertex newVertex = new MultiDeBruijnVertex(kmer.bases());
final int prevSize = vertexSet().size();
addVertex(newVertex);
// make sure we aren't adding duplicates (would be a bug)
if ( vertexSet().size() != prevSize + 1) throw new IllegalStateException("Adding vertex " + newVertex + " to graph didn't increase the graph size");
// add the vertex to the unique kmer map, if it is in fact unique
if ( ! nonUniqueKmers.contains(kmer) && ! uniqueKmers.containsKey(kmer) ) // TODO -- not sure this last test is necessary
uniqueKmers.put(kmer, newVertex);
return newVertex;
}
/**
* Workhorse routine of the assembler. Given a sequence whose last vertex is anchored in the graph, extend
* the graph one bp according to the bases in sequence.
*
* @param prevVertex a non-null vertex where sequence was last anchored in the graph
* @param sequence the sequence we're threading through the graph
* @param kmerStart the start of the current kmer in graph we'd like to add
* @param count the number of observations of this kmer in graph (can be > 1 for reduced reads)
* @param isRef is this the reference sequence?
* @return a non-null vertex connecting prevVertex to in the graph based on sequence
*/
private MultiDeBruijnVertex extendChainByOne(final MultiDeBruijnVertex prevVertex, final byte[] sequence, final int kmerStart, final int count, final boolean isRef) {
final Set<MultiSampleEdge> outgoingEdges = outgoingEdgesOf(prevVertex);
final int nextPos = kmerStart + kmerSize - 1;
for ( final MultiSampleEdge outgoingEdge : outgoingEdges ) {
final MultiDeBruijnVertex target = getEdgeTarget(outgoingEdge);
if ( target.getSuffix() == sequence[nextPos] ) {
// we've got a match in the chain, so simply increase the count of the edge by 1 and continue
outgoingEdge.incMultiplicity(count);
return target;
}
}
// none of our outgoing edges had our unique suffix base, so we check for an opportunity to merge back in
final Kmer kmer = new Kmer(sequence, kmerStart, kmerSize);
final MultiDeBruijnVertex uniqueMergeVertex = getUniqueKmerVertex(kmer, false);
if ( isRef && uniqueMergeVertex != null )
throw new IllegalStateException("Found a unique vertex to merge into the reference graph " + prevVertex + " -> " + uniqueMergeVertex);
// either use our unique merge vertex, or create a new one in the chain
final MultiDeBruijnVertex nextVertex = uniqueMergeVertex == null ? createVertex(kmer) : uniqueMergeVertex;
addEdge(prevVertex, nextVertex, ((MyEdgeFactory)getEdgeFactory()).createEdge(isRef, count));
return nextVertex;
}
/**
* Add the given read to the sequence graph. Ultimately the read will get sent through addSequence(), but first
* this method ensures we only use high quality bases and accounts for reduced reads, etc.
*
* @param read a non-null read
*/
protected void addRead(final GATKSAMRecord read) {
final byte[] sequence = read.getReadBases();
final byte[] qualities = read.getBaseQualities();
final int[] reducedReadCounts = read.getReducedReadCounts(); // will be null if read is not reduced
int lastGood = -1; // the index of the last good base we've seen
for( int end = 0; end <= sequence.length; end++ ) {
if ( end == sequence.length || ! baseIsUsableForAssembly(sequence[end], qualities[end]) ) {
// the first good base is at lastGood, can be -1 if last base was bad
final int start = lastGood;
// the stop base is end - 1 (if we're not at the end of the sequence)
final int stop = end == sequence.length ? sequence.length : end;
final int len = stop - start + 1;
if ( start != -1 && len >= kmerSize ) {
// if the sequence is long enough to get some value out of, add it to the graph
final String name = read.getReadName() + "_" + start + "_" + end;
addSequence(name, read.getReadGroup().getSample(), read.getReadBases(), start, stop, reducedReadCounts, false);
}
lastGood = -1; // reset the last good base
} else if ( lastGood == -1 ) {
lastGood = end; // we're at a good base, the last good one is us
}
}
}
/**
* Determines whether a base can safely be used for assembly.
* Currently disallows Ns and/or those with low quality
*
* @param base the base under consideration
* @param qual the quality of that base
* @return true if the base can be used for assembly, false otherwise
*/
protected boolean baseIsUsableForAssembly(final byte base, final byte qual) {
return base != BaseUtils.Base.N.base && qual >= minBaseQualityToUseInAssembly;
}
/**
* Get the set of non-unique kmers in this graph. For debugging purposes
* @return a non-null set of kmers
*/
protected Set<Kmer> getNonUniqueKmers() {
return nonUniqueKmers;
}
@Override
public String toString() {
return "ReadThreadingAssembler{" +
"kmerSize=" + kmerSize +
'}';
}
}

View File

@ -0,0 +1,93 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.readthreading;
/**
* Keeps track of the information needed to add a sequence to the read threading assembly graph
*
* User: depristo
* Date: 4/18/13
* Time: 8:59 AM
* To change this template use File | Settings | File Templates.
*/
final class SequenceForKmers {
final String name;
final byte[] sequence;
final int start, stop;
final private int[] counts;
final boolean isRef;
/**
* Create a new sequence for creating kmers
*/
SequenceForKmers(final String name, byte[] sequence, int start, int stop, int[] counts, boolean ref) {
if ( start < 0 ) throw new IllegalArgumentException("Invalid start " + start);
if ( stop < start ) throw new IllegalArgumentException("Invalid stop " + stop);
if ( sequence == null ) throw new IllegalArgumentException("Sequence is null ");
if ( counts != null && counts.length != sequence.length ) throw new IllegalArgumentException("Sequence and counts don't have the same length " + sequence.length + " vs " + counts.length);
this.name = name;
this.sequence = sequence;
this.start = start;
this.stop = stop;
this.isRef = ref;
this.counts = counts;
}
/**
* Get the number of observations of the kmer starting at i in this sequence
*
* Can we > 1 because sequence may be a reduced read and therefore count as N observations
*
* @param i the offset into sequence for the start of the kmer
* @return a count >= 1 that indicates the number of observations of kmer starting at i in this sequence.
*/
public int getCount(final int i) {
if ( i < 0 || i > sequence.length ) throw new ArrayIndexOutOfBoundsException("i must be >= 0 and <= " + sequence.length + " but got " + i);
return counts == null ? 1 : counts[i];
}
}

View File

@ -212,6 +212,15 @@ public class ConstrainedMateFixingManager {
public int getNReadsInQueue() { return waitingReads.size(); }
/**
* For testing purposes only
*
* @return the list of reads currently in the queue
*/
protected List<SAMRecord> getReadsInQueueForTesting() {
return new ArrayList<SAMRecord>(waitingReads);
}
public boolean canMoveReads(GenomeLoc earliestPosition) {
if ( DEBUG ) logger.info("Refusing to realign? " + earliestPosition + " vs. " + lastLocFlushed);
@ -233,7 +242,7 @@ public class ConstrainedMateFixingManager {
addRead(newRead, modifiedReads.contains(newRead), false);
}
private void addRead(SAMRecord newRead, boolean readWasModified, boolean canFlush) {
protected void addRead(SAMRecord newRead, boolean readWasModified, boolean canFlush) {
if ( DEBUG ) logger.info("New read pos " + newRead.getAlignmentStart() + " OP = " + newRead.getAttribute("OP") + " " + readWasModified);
//final long curTime = timer.currentTime();
@ -265,7 +274,7 @@ public class ConstrainedMateFixingManager {
// fix mates, as needed
// Since setMateInfo can move reads, we potentially need to remove the mate, and requeue
// it to ensure proper sorting
if ( newRead.getReadPairedFlag() ) {
if ( newRead.getReadPairedFlag() && !newRead.getNotPrimaryAlignmentFlag() ) {
SAMRecordHashObject mate = forMateMatching.get(newRead.getReadName());
if ( mate != null ) {
// 1. Frustratingly, Picard's setMateInfo() method unaligns (by setting the reference contig

View File

@ -54,6 +54,7 @@ import org.broadinstitute.sting.utils.clipping.ReadClipper;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.pairhmm.Log10PairHMM;
import org.broadinstitute.sting.utils.pairhmm.LoglessPairHMM;
import org.broadinstitute.sting.utils.pairhmm.PairHMM;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
@ -78,8 +79,6 @@ public class PairHMMIndelErrorModel {
private static final double baseMatchArray[];
private static final double baseMismatchArray[];
private final static double LOG_ONE_HALF;
private static final int START_HRUN_GAP_IDX = 4;
private static final int MAX_HRUN_GAP_IDX = 20;
@ -97,8 +96,6 @@ public class PairHMMIndelErrorModel {
/////////////////////////////
static {
LOG_ONE_HALF= -Math.log10(2.0);
baseMatchArray = new double[MAX_CACHED_QUAL+1];
baseMismatchArray = new double[MAX_CACHED_QUAL+1];
for (int k=1; k <= MAX_CACHED_QUAL; k++) {
@ -120,12 +117,11 @@ public class PairHMMIndelErrorModel {
case ORIGINAL:
pairHMM = new Log10PairHMM(false);
break;
case LOGLESS_CACHING: //TODO: still not tested so please do not use yet
//pairHMM = new LoglessCachingPairHMM(); //TODO - add it back when the figure out how to use the protected LoglessCachingPairHMM class
throw new UserException.BadArgumentValue("pairHMM"," this option (LOGLESS_CACHING in UG) is still under development");
//break;
case LOGLESS_CACHING:
pairHMM = new LoglessPairHMM();
break;
default:
throw new UserException.BadArgumentValue("pairHMM", "Specified pairHMM implementation is unrecognized or incompatible with the UnifiedGenotyper. Acceptable options are ORIGINAL, EXACT or LOGLESS_CACHING (the third option is still under development).");
throw new UserException.BadArgumentValue("pairHMM", "Specified pairHMM implementation is unrecognized or incompatible with the UnifiedGenotyper. Acceptable options are ORIGINAL, EXACT or LOGLESS_CACHING.");
}
// fill gap penalty table, affine naive model:
@ -466,7 +462,7 @@ public class PairHMMIndelErrorModel {
final double li = readLikelihoods[readIdx][i];
final double lj = readLikelihoods[readIdx][j];
final int readCount = readCounts[readIdx];
haplotypeLikehoodMatrix[i][j] += readCount * (MathUtils.approximateLog10SumLog10(li, lj) + LOG_ONE_HALF);
haplotypeLikehoodMatrix[i][j] += readCount * (MathUtils.approximateLog10SumLog10(li, lj) + MathUtils.LOG_ONE_HALF);
}
}
}

View File

@ -47,7 +47,6 @@
package org.broadinstitute.sting.gatk.walkers.variantrecalibration;
import Jama.Matrix;
import cern.jet.random.Normal;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.GenomeAnalysisEngine;
import org.broadinstitute.sting.utils.MathUtils;
@ -243,12 +242,10 @@ public class GaussianMixtureModel {
public Double evaluateDatumInOneDimension( final VariantDatum datum, final int iii ) {
if(datum.isNull[iii]) { return null; }
final Normal normal = new Normal(0.0, 1.0, null);
final double[] pVarInGaussianLog10 = new double[gaussians.size()];
int gaussianIndex = 0;
for( final MultivariateGaussian gaussian : gaussians ) {
normal.setState( gaussian.mu[iii], gaussian.sigma.get(iii, iii) );
pVarInGaussianLog10[gaussianIndex++] = gaussian.pMixtureLog10 + Math.log10( normal.pdf( datum.annotations[iii] ) );
pVarInGaussianLog10[gaussianIndex++] = gaussian.pMixtureLog10 + MathUtils.normalDistributionLog10(gaussian.mu[iii], gaussian.sigma.get(iii, iii), datum.annotations[iii]);
}
return MathUtils.log10sumLog10(pVarInGaussianLog10); // Sum(pi_k * p(v|n,k))
}

View File

@ -80,18 +80,18 @@ class AllHaplotypeBAMWriter extends HaplotypeBAMWriter {
final List<Haplotype> bestHaplotypes,
final Set<Haplotype> calledHaplotypes,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap) {
writeHaplotypesAsReads(haplotypes, new HashSet<Haplotype>(bestHaplotypes), paddedReferenceLoc);
writeHaplotypesAsReads(haplotypes, new HashSet<>(bestHaplotypes), paddedReferenceLoc);
// we need to remap the Alleles back to the Haplotypes; inefficient but unfortunately this is a requirement currently
final Map<Allele, Haplotype> alleleToHaplotypeMap = new HashMap<Allele, Haplotype>(haplotypes.size());
final Map<Allele, Haplotype> alleleToHaplotypeMap = new HashMap<>(haplotypes.size());
for ( final Haplotype haplotype : haplotypes )
alleleToHaplotypeMap.put(Allele.create(haplotype.getBases()), haplotype);
// next, output the interesting reads for each sample aligned against the appropriate haplotype
for ( final PerReadAlleleLikelihoodMap readAlleleLikelihoodMap : stratifiedReadMap.values() ) {
for ( Map.Entry<GATKSAMRecord, Map<Allele, Double>> entry : readAlleleLikelihoodMap.getLikelihoodReadMap().entrySet() ) {
for ( final Map.Entry<GATKSAMRecord, Map<Allele, Double>> entry : readAlleleLikelihoodMap.getLikelihoodReadMap().entrySet() ) {
final MostLikelyAllele bestAllele = PerReadAlleleLikelihoodMap.getMostLikelyAllele(entry.getValue());
writeReadAgainstHaplotype(entry.getKey(), alleleToHaplotypeMap.get(bestAllele.getMostLikelyAllele()), paddedReferenceLoc.getStart());
writeReadAgainstHaplotype(entry.getKey(), alleleToHaplotypeMap.get(bestAllele.getMostLikelyAllele()), paddedReferenceLoc.getStart(), bestAllele.isInformative());
}
}
}

View File

@ -87,7 +87,7 @@ class CalledHaplotypeBAMWriter extends HaplotypeBAMWriter {
writeHaplotypesAsReads(calledHaplotypes, calledHaplotypes, paddedReferenceLoc);
// we need to remap the Alleles back to the Haplotypes; inefficient but unfortunately this is a requirement currently
final Map<Allele, Haplotype> alleleToHaplotypeMap = new HashMap<Allele, Haplotype>(haplotypes.size());
final Map<Allele, Haplotype> alleleToHaplotypeMap = new HashMap<>(haplotypes.size());
for ( final Haplotype haplotype : calledHaplotypes ) {
alleleToHaplotypeMap.put(Allele.create(haplotype.getBases()), haplotype);
}
@ -97,10 +97,10 @@ class CalledHaplotypeBAMWriter extends HaplotypeBAMWriter {
// next, output the interesting reads for each sample aligned against one of the called haplotypes
for ( final PerReadAlleleLikelihoodMap readAlleleLikelihoodMap : stratifiedReadMap.values() ) {
for ( Map.Entry<GATKSAMRecord, Map<Allele, Double>> entry : readAlleleLikelihoodMap.getLikelihoodReadMap().entrySet() ) {
for ( final Map.Entry<GATKSAMRecord, Map<Allele, Double>> entry : readAlleleLikelihoodMap.getLikelihoodReadMap().entrySet() ) {
if ( entry.getKey().getMappingQuality() > 0 ) {
final MostLikelyAllele bestAllele = PerReadAlleleLikelihoodMap.getMostLikelyAllele(entry.getValue(), allelesOfCalledHaplotypes);
writeReadAgainstHaplotype(entry.getKey(), alleleToHaplotypeMap.get(bestAllele.getMostLikelyAllele()), paddedReferenceLoc.getStart());
writeReadAgainstHaplotype(entry.getKey(), alleleToHaplotypeMap.get(bestAllele.getMostLikelyAllele()), paddedReferenceLoc.getStart(), bestAllele.isInformative());
}
}
}

View File

@ -185,11 +185,13 @@ public abstract class HaplotypeBAMWriter {
* @param originalRead the read we want to write aligned to the reference genome
* @param haplotype the haplotype that the read should be aligned to, before aligning to the reference
* @param referenceStart the start of the reference that haplotype is aligned to. Provides global coordinate frame.
* @param isInformative true if the read is differentially informative for one of the haplotypes
*/
protected void writeReadAgainstHaplotype(final GATKSAMRecord originalRead,
final Haplotype haplotype,
final int referenceStart) {
final GATKSAMRecord alignedToRef = createReadAlignedToRef(originalRead, haplotype, referenceStart);
final int referenceStart,
final boolean isInformative) {
final GATKSAMRecord alignedToRef = createReadAlignedToRef(originalRead, haplotype, referenceStart, isInformative);
if ( alignedToRef != null )
bamWriter.addAlignment(alignedToRef);
}
@ -201,11 +203,13 @@ public abstract class HaplotypeBAMWriter {
* @param originalRead the read we want to write aligned to the reference genome
* @param haplotype the haplotype that the read should be aligned to, before aligning to the reference
* @param referenceStart the start of the reference that haplotype is aligned to. Provides global coordinate frame.
* @param isInformative true if the read is differentially informative for one of the haplotypes
* @return a GATKSAMRecord aligned to reference, or null if no meaningful alignment is possible
*/
protected GATKSAMRecord createReadAlignedToRef(final GATKSAMRecord originalRead,
final Haplotype haplotype,
final int referenceStart) {
final int referenceStart,
final boolean isInformative) {
if ( originalRead == null ) throw new IllegalArgumentException("originalRead cannot be null");
if ( haplotype == null ) throw new IllegalArgumentException("haplotype cannot be null");
if ( haplotype.getCigar() == null ) throw new IllegalArgumentException("Haplotype cigar not set " + haplotype);
@ -225,6 +229,10 @@ public abstract class HaplotypeBAMWriter {
addHaplotypeTag(read, haplotype);
// uninformative reads are set to zero mapping quality to enhance visualization
if ( !isInformative )
read.setMappingQuality(0);
// compute here the read starts w.r.t. the reference from the SW result and the hap -> ref cigar
final Cigar extendedHaplotypeCigar = haplotype.getConsolidatedPaddedCigar(1000);
final int readStartOnHaplotype = AlignmentUtils.calcFirstBaseMatchingReferenceInCigar(extendedHaplotypeCigar, swPairwiseAlignment.getAlignmentStart2wrt1());

View File

@ -55,7 +55,7 @@ import org.broadinstitute.sting.utils.QualityUtils;
* User: rpoplin, carneiro
* Date: 10/16/12
*/
public final class LoglessPairHMM extends PairHMM {
public final class LoglessPairHMM extends N2MemoryPairHMM {
protected static final double INITIAL_CONDITION = Math.pow(2, 1020);
protected static final double INITIAL_CONDITION_LOG10 = Math.log10(INITIAL_CONDITION);
@ -99,8 +99,13 @@ public final class LoglessPairHMM extends PairHMM {
}
}
if ( ! constantsAreInitialized || recacheReadValues )
initializeProbabilities(insertionGOP, deletionGOP, overallGCP);
if ( ! constantsAreInitialized || recacheReadValues ) {
initializeProbabilities(transition, insertionGOP, deletionGOP, overallGCP);
// note that we initialized the constants
constantsAreInitialized = true;
}
initializePriors(haplotypeBases, readBases, readQuals, hapStartIndex);
for (int i = 1; i < paddedReadLength; i++) {
@ -159,7 +164,7 @@ public final class LoglessPairHMM extends PairHMM {
"overallGCP != null"
})
@Ensures("constantsAreInitialized")
private void initializeProbabilities(final byte[] insertionGOP, final byte[] deletionGOP, final byte[] overallGCP) {
protected static void initializeProbabilities(final double[][] transition, final byte[] insertionGOP, final byte[] deletionGOP, final byte[] overallGCP) {
for (int i = 0; i < insertionGOP.length; i++) {
final int qualIndexGOP = Math.min(insertionGOP[i] + deletionGOP[i], Byte.MAX_VALUE);
transition[i+1][matchToMatch] = QualityUtils.qualToProb((byte) qualIndexGOP);
@ -169,9 +174,6 @@ public final class LoglessPairHMM extends PairHMM {
transition[i+1][matchToDeletion] = QualityUtils.qualToErrorProb(deletionGOP[i]);
transition[i+1][deletionToDeletion] = QualityUtils.qualToErrorProb(overallGCP[i]);
}
// note that we initialized the constants
constantsAreInitialized = true;
}
/**

View File

@ -0,0 +1,162 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.pairhmm;
import net.sf.samtools.SAMUtils;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.text.XReadLines;
import java.io.*;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.zip.GZIPInputStream;
/**
* Useful single class carrying test data for PairHMMs (for use in benchmarking and unit tests)
*
* User: depristo
* Date: 5/12/13
* Time: 3:52 PM
* To change this template use File | Settings | File Templates.
*/
public class PairHMMTestData {
public final String ref;
private final String read;
public final byte[] baseQuals, insQuals, delQuals, gcp;
public final double log10l;
PairHMMTestData(String ref, String read, byte[] baseQuals, byte[] insQuals, byte[] delQuals, byte[] gcp, double log10l) {
this.ref = ref;
this.read = read;
this.baseQuals = baseQuals;
this.insQuals = insQuals;
this.delQuals = delQuals;
this.gcp = gcp;
this.log10l = log10l;
}
PairHMMTestData(String ref, String read, final byte qual) {
this.ref = ref;
this.read = read;
this.baseQuals = this.insQuals = this.delQuals = Utils.dupBytes(qual, read.length());
this.gcp = Utils.dupBytes((byte)10, read.length());
this.log10l = -1;
}
public double runHMM(final PairHMM hmm) {
hmm.initialize(getRead().length(), ref.length());
return hmm.computeReadLikelihoodGivenHaplotypeLog10(ref.getBytes(), getRead().getBytes(),
baseQuals, insQuals, delQuals, gcp, true);
}
@Override
public String toString() {
return "Info{" +
"ref='" + ref + '\'' +
", read='" + getRead() + '\'' +
", log10l=" + log10l +
'}';
}
public static void runHMMs(final PairHMM hmm, final List<PairHMMTestData> data, final boolean runSingly) {
if ( runSingly ) {
for ( final PairHMMTestData datum : data )
datum.runHMM(hmm);
} else {
// running in batch mode
final PairHMMTestData first = data.get(0);
int maxHaplotypeLen = calcMaxHaplotypeLen(data);
hmm.initialize(first.getRead().length(), maxHaplotypeLen);
for ( final PairHMMTestData datum : data ) {
hmm.computeReadLikelihoodGivenHaplotypeLog10(datum.ref.getBytes(), datum.getRead().getBytes(),
datum.baseQuals, datum.insQuals, datum.delQuals, datum.gcp, false);
}
}
}
public static int calcMaxHaplotypeLen(final List<PairHMMTestData> data) {
int maxHaplotypeLen = 0;
for ( final PairHMMTestData datum : data )
maxHaplotypeLen = Math.max(maxHaplotypeLen, datum.ref.length());
return maxHaplotypeLen;
}
public static Map<String, List<PairHMMTestData>> readLikelihoods(final File file) throws IOException {
final Map<String, List<PairHMMTestData>> results = new LinkedHashMap<>();
InputStream in = new FileInputStream(file);
if ( file.getName().endsWith(".gz") ) {
in = new GZIPInputStream(in);
}
for ( final String line : new XReadLines(in) ) {
final String[] parts = line.split(" ");
final PairHMMTestData info = new PairHMMTestData(
parts[0], parts[1],
SAMUtils.fastqToPhred(parts[2]),
SAMUtils.fastqToPhred(parts[3]),
SAMUtils.fastqToPhred(parts[4]),
SAMUtils.fastqToPhred(parts[5]),
Double.parseDouble(parts[6]));
if ( ! results.containsKey(info.read) ) {
results.put(info.read, new LinkedList<PairHMMTestData>());
}
final List<PairHMMTestData> byHap = results.get(info.read);
byHap.add(info);
}
return results;
}
public String getRead() {
return read;
}
}

View File

@ -70,9 +70,7 @@ import org.broadinstitute.sting.utils.sam.GATKSAMReadGroupRecord;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.*;
import java.util.*;
/**
@ -223,6 +221,150 @@ public class RecalUtils {
}
}
/**
* Component used to print out csv representation of the reports that can be use to perform analysis in
* external tools. E.g. generate plots using R scripts.
* <p/>
* A header is always printed into the output stream (or file) when the printer is created. Then you only need
* to call {@link #print(RecalibrationReport,String) print} for each report you want to include in the csv file.
* Once finished, you close the printer calling {@link #close() close}
*
*/
private static class CsvPrinter {
private final PrintStream ps;
private final Covariate[] covariates;
/**
* Constructs a printer redirected to an output file.
* @param out the output file.
* @param c covariates to print out.
* @throws FileNotFoundException if the file could not be created anew.
*/
protected CsvPrinter(final File out, final Covariate ... c)
throws FileNotFoundException {
this(new FileOutputStream(out), c);
}
/**
* Constructs a printer redirected to an output stream
* @param os the output.
* @param c covariates to print out.
*/
protected CsvPrinter(final OutputStream os, final Covariate ... c) {
covariates = c == null ? new Covariate[0] : c.clone();
ps = new PrintStream(os);
printHeader();
}
/**
* Prints the header out.
* <p/>
* Should only be invoked at creation.
*/
protected void printHeader() {
RecalUtils.printHeader(ps);
}
/**
* Prints out a report into the csv file.
*
*
* @param report the report to print out.
* @param mode the report associated mode. (typically ORIGINAL, RECALIBRATED
*/
public void print(final RecalibrationReport report, final String mode) {
RecalUtils.writeCSV(ps,report.getRecalibrationTables(),mode,covariates,false);
}
/**
* Close the csv printer.
*
* No further output will be allowed or take place after calling this method.
*/
public void close() {
ps.close();
}
}
/**
* Returns a csv output printer.
*
* @param out the output file. It will be overridden
* @param c list of covariates to print out.
*
* @throws FileNotFoundException if <code>out</code> could not be created anew.
*
* @return never <code>null</code>
*/
protected static CsvPrinter csvPrinter(final File out, final Covariate ... c)
throws FileNotFoundException
{
if (c == null) {
throw new IllegalArgumentException("the input covariate array cannot be null");
}
return new CsvPrinter(out,c);
}
/**
* Prints out a collection of reports into a file in Csv format in a way
* that can be used by R scripts (such as the plot generator script).
* <p/>
* The set of covariates is take as the minimum common set from all reports.
*
* @param out the output file. It will be overridden.
* @param reports map where keys are the unique 'mode' (ORIGINAL, RECALIBRATED, ...)
* of each report and the corresponding value the report itself.
* @throws FileNotFoundException if <code>out</code> could not be created anew.
*/
public static void generateCsv(final File out, final Map<String, RecalibrationReport> reports)
throws FileNotFoundException {
if (reports.size() == 0) {
writeCsv(out, reports, new Covariate[0]);
} else {
final Iterator<RecalibrationReport> rit = reports.values().iterator();
final RecalibrationReport first = rit.next();
final Covariate[] firstCovariates = first.getRequestedCovariates();
final Set<Covariate> covariates = new LinkedHashSet<>();
Utils.addAll(covariates,firstCovariates);
while (rit.hasNext() && covariates.size() > 0) {
final Covariate[] nextCovariates = rit.next().getRequestedCovariates();
final Set<String> nextCovariateNames = new LinkedHashSet<String>(nextCovariates.length);
for (final Covariate nc : nextCovariates) {
nextCovariateNames.add(nc.getClass().getSimpleName());
}
final Iterator<Covariate> cit = covariates.iterator();
while (cit.hasNext()) {
if (!nextCovariateNames.contains(cit.next().getClass().getSimpleName())) {
cit.remove();
}
}
}
writeCsv(out, reports, covariates.toArray(new Covariate[covariates.size()]));
}
}
/**
* Print out a collection of reports into a file in Csv format in a way
* that can be used by R scripts (such as the plot generator script).
*
* @param out
* @param reports map where keys are the unique 'mode' (ORIGINAL, RECALIBRATED, ...)
* of each report and the corresponding value the report itself.
* @param c the covariates to print out.
* @throws FileNotFoundException if <code>out</code> could not be created anew.
*/
private static void writeCsv(final File out,
final Map<String, RecalibrationReport> reports, final Covariate[] c)
throws FileNotFoundException {
final CsvPrinter p = csvPrinter(out,c);
for (Map.Entry<String,RecalibrationReport> e : reports.entrySet()) {
p.print(e.getValue(),e.getKey());
}
p.close();
}
public enum SOLID_RECAL_MODE {
/**
* Treat reference inserted bases as reference matching bases. Very unsafe!
@ -390,36 +532,66 @@ public class RecalUtils {
report.print(outputFile);
}
private static void outputRecalibrationPlot(final RecalibrationArgumentCollection RAC) {
/** s
* Write recalibration plots into a file
*
* @param csvFile location of the intermediary file
* @param exampleReportFile where the report arguments are collected from.
* @param output result plot file name.
*/
public static void generatePlots(final File csvFile, final File exampleReportFile, final File output) {
final RScriptExecutor executor = new RScriptExecutor();
executor.setExceptOnError(true);
executor.addScript(new Resource(SCRIPT_FILE, RecalUtils.class));
executor.addArgs(RAC.RECAL_CSV_FILE.getAbsolutePath());
executor.addArgs(RAC.RECAL_TABLE_FILE.getAbsolutePath());
executor.addArgs(RAC.RECAL_PDF_FILE.getAbsolutePath());
executor.addArgs(csvFile.getAbsolutePath());
executor.addArgs(exampleReportFile.getAbsolutePath());
executor.addArgs(output.getAbsolutePath());
Logger.getLogger(RecalUtils.class).debug("R command line: " + executor.getApproximateCommandLine());
executor.exec();
}
private static void outputRecalibrationPlot(final File csvFile, final RecalibrationArgumentCollection RAC) {
final RScriptExecutor executor = new RScriptExecutor();
executor.addScript(new Resource(SCRIPT_FILE, RecalUtils.class));
executor.addArgs(csvFile.getAbsolutePath());
executor.addArgs(RAC.RECAL_TABLE_FILE.getAbsolutePath());
executor.exec();
}
/**
* Please use {@link #generateCsv(java.io.File, java.util.Map)} and {@link #generatePlots(java.io.File, java.io.File, java.io.File)} instead.
*
* @deprecated
*/
@Deprecated
public static void generateRecalibrationPlot(final RecalibrationArgumentCollection RAC, final RecalibrationTables original, final Covariate[] requestedCovariates) {
generateRecalibrationPlot(RAC, original, null, requestedCovariates);
}
/**
* Please use {@link #generateCsv(java.io.File, java.util.Map)} and {@link #generatePlots(java.io.File, java.io.File, java.io.File)} instead.
*
* @deprecated
*/
@Deprecated
public static void generateRecalibrationPlot(final RecalibrationArgumentCollection RAC, final RecalibrationTables original, final RecalibrationTables recalibrated, final Covariate[] requestedCovariates) {
final PrintStream csvFile;
final PrintStream csvStream;
final File csvTempFile = null;
try {
if ( RAC.RECAL_CSV_FILE == null ) {
RAC.RECAL_CSV_FILE = File.createTempFile("BQSR", ".csv");
RAC.RECAL_CSV_FILE.deleteOnExit();
}
csvFile = new PrintStream(RAC.RECAL_CSV_FILE);
File csvTmpFile = File.createTempFile("BQSR",".csv");
csvTmpFile.deleteOnExit();
csvStream = new PrintStream(csvTmpFile);
} catch (IOException e) {
throw new UserException.CouldNotCreateOutputFile(RAC.RECAL_CSV_FILE, e);
throw new UserException("Could not create temporary csv file", e);
}
if ( recalibrated != null )
writeCSV(csvFile, recalibrated, "RECALIBRATED", requestedCovariates, true);
writeCSV(csvFile, original, "ORIGINAL", requestedCovariates, recalibrated == null);
outputRecalibrationPlot(RAC);
writeCSV(csvStream, recalibrated, "RECALIBRATED", requestedCovariates, true);
writeCSV(csvStream, original, "ORIGINAL", requestedCovariates, recalibrated == null);
csvStream.close();
outputRecalibrationPlot(csvTempFile, RAC);
csvTempFile.delete();
}
private static void writeCSV(final PrintStream deltaTableFile, final RecalibrationTables recalibrationTables, final String recalibrationMode, final Covariate[] requestedCovariates, final boolean printHeader) {
@ -452,18 +624,7 @@ public class RecalUtils {
// output the csv file
if (printHeader) {
final List<String> header = new LinkedList<String>();
header.add("ReadGroup");
header.add("CovariateValue");
header.add("CovariateName");
header.add("EventType");
header.add("Observations");
header.add("Errors");
header.add("EmpiricalQuality");
header.add("AverageReportedQuality");
header.add("Accuracy");
header.add("Recalibration");
deltaTableFile.println(Utils.join(",", header));
printHeader(deltaTableFile);
}
final Map<Covariate, String> covariateNameMap = new HashMap<Covariate, String>(requestedCovariates.length);
@ -480,6 +641,21 @@ public class RecalUtils {
}
}
private static void printHeader(PrintStream out) {
final List<String> header = new LinkedList<String>();
header.add("ReadGroup");
header.add("CovariateValue");
header.add("CovariateName");
header.add("EventType");
header.add("Observations");
header.add("Errors");
header.add("EmpiricalQuality");
header.add("AverageReportedQuality");
header.add("Accuracy");
header.add("Recalibration");
out.println(Utils.join(",", header));
}
/*
* Return an initialized nested integer array with appropriate dimensions for use with the delta tables
*

View File

@ -340,9 +340,6 @@ public class RecalibrationReport {
else if (argument.equals("recalibration_report"))
RAC.existingRecalibrationReport = (value == null) ? null : new File((String) value);
else if (argument.equals("plot_pdf_file"))
RAC.RECAL_PDF_FILE = (value == null) ? null : new File((String) value);
else if (argument.equals("binary_tag_name"))
RAC.BINARY_TAG_NAME = (value == null) ? null : (String) value;
@ -369,6 +366,11 @@ public class RecalibrationReport {
return RAC;
}
/**
*
* @deprecated use {@link #getRequestedCovariates()} instead.
*/
@Deprecated
public Covariate[] getCovariates() {
return requestedCovariates;
}

View File

@ -67,6 +67,8 @@ import java.util.ArrayList;
public class ContextCovariate implements StandardCovariate {
private final static Logger logger = Logger.getLogger(ContextCovariate.class);
private int mismatchesContextSize;
private int indelsContextSize;

View File

@ -93,10 +93,13 @@ public class ReadGroupCovariate implements RequiredCovariate {
private final HashMap<String, Integer> readGroupLookupTable = new HashMap<String, Integer>();
private final HashMap<Integer, String> readGroupReverseLookupTable = new HashMap<Integer, String>();
private int nextId = 0;
private String forceReadGroup;
// Initialize any member variables using the command-line arguments passed to the walkers
@Override
public void initialize(final RecalibrationArgumentCollection RAC) {}
public void initialize(final RecalibrationArgumentCollection RAC) {
forceReadGroup = RAC.FORCE_READGROUP;
}
@Override
public void recordValues(final GATKSAMRecord read, final ReadCovariates values) {
@ -170,6 +173,9 @@ public class ReadGroupCovariate implements RequiredCovariate {
* @return platform unit or readgroup id
*/
private String readGroupValueFromRG(final GATKSAMReadGroupRecord rg) {
if ( forceReadGroup != null )
return forceReadGroup;
final String platformUnit = rg.getPlatformUnit();
return platformUnit == null ? rg.getId() : platformUnit;
}

View File

@ -0,0 +1,151 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.annotator;
import org.broadinstitute.sting.gatk.walkers.compression.reducereads.*;
import org.broadinstitute.sting.gatk.walkers.compression.reducereads.BaseCounts;
import org.broadinstitute.sting.utils.MannWhitneyU;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class RankSumUnitTest {
List<Integer> distribution20, distribution30, distribution20_40;
static final int observations = 100;
@BeforeClass
public void init() {
distribution20 = new ArrayList<>(observations);
distribution30 = new ArrayList<>(observations);
distribution20_40 = new ArrayList<>(observations);
final int skew = 3;
makeDistribution(distribution20, 20, skew, observations);
makeDistribution(distribution30, 30, skew, observations);
makeDistribution(distribution20_40, 20, skew, observations/2);
makeDistribution(distribution20_40, 40, skew, observations/2);
// shuffle the observations
Collections.shuffle(distribution20);
Collections.shuffle(distribution30);
Collections.shuffle(distribution20_40);
}
private static void makeDistribution(final List<Integer> result, final int target, final int skew, final int numObservations) {
final int rangeStart = target - skew;
final int rangeEnd = target + skew;
int current = rangeStart;
for ( int i = 0; i < numObservations; i++ ) {
result.add(current++);
if ( current > rangeEnd )
current = rangeStart;
}
}
@DataProvider(name = "DistributionData")
public Object[][] makeDistributionData() {
List<Object[]> tests = new ArrayList<Object[]>();
for ( final int numToReduce : Arrays.asList(0, 10, 50, 100) ) {
tests.add(new Object[]{distribution20, distribution20, numToReduce, true, "20-20"});
tests.add(new Object[]{distribution30, distribution30, numToReduce, true, "30-30"});
tests.add(new Object[]{distribution20_40, distribution20_40, numToReduce, true, "20/40-20/40"});
tests.add(new Object[]{distribution20, distribution30, numToReduce, false, "20-30"});
tests.add(new Object[]{distribution30, distribution20, numToReduce, false, "30-20"});
tests.add(new Object[]{distribution20, distribution20_40, numToReduce, false, "20-20/40"});
tests.add(new Object[]{distribution30, distribution20_40, numToReduce, true, "30-20/40"});
}
return tests.toArray(new Object[][]{});
}
@Test(enabled = true, dataProvider = "DistributionData")
public void testDistribution(final List<Integer> distribution1, final List<Integer> distribution2, final int numToReduceIn2, final boolean distributionsShouldBeEqual, final String debugString) {
final MannWhitneyU mannWhitneyU = new MannWhitneyU(true);
for ( final Integer num : distribution1 )
mannWhitneyU.add(num, MannWhitneyU.USet.SET1);
final List<Integer> dist2 = new ArrayList<>(distribution2);
if ( numToReduceIn2 > 0 ) {
final org.broadinstitute.sting.gatk.walkers.compression.reducereads.BaseCounts counts = new BaseCounts();
for ( int i = 0; i < numToReduceIn2; i++ ) {
final int value = dist2.remove(0);
counts.incr(BaseIndex.A, (byte)value, 0, false);
}
final int qual = (int)counts.averageQualsOfBase(BaseIndex.A);
for ( int i = 0; i < numToReduceIn2; i++ )
dist2.add(qual);
}
for ( final Integer num : dist2 )
mannWhitneyU.add(num, MannWhitneyU.USet.SET2);
final Double result = mannWhitneyU.runTwoSidedTest().second;
Assert.assertFalse(Double.isNaN(result));
if ( distributionsShouldBeEqual ) {
// TODO -- THIS IS THE FAILURE POINT OF USING REDUCED READS WITH RANK SUM TESTS
if ( numToReduceIn2 >= observations / 2 )
return;
Assert.assertTrue(result > 0.1, String.format("%f %d %d", result, numToReduceIn2, dist2.get(0)));
} else {
Assert.assertTrue(result < 0.01, String.format("%f %d %d", result, numToReduceIn2, dist2.get(0)));
}
}
}

View File

@ -78,7 +78,7 @@ public class VariantAnnotatorIntegrationTest extends WalkerTest {
public void testHasAnnotsAsking1() {
WalkerTestSpec spec = new WalkerTestSpec(
baseTestString() + " -G Standard --variant " + privateTestDir + "vcfexample2.vcf -I " + validationDataLocation + "low_coverage_CEU.chr1.10k-11k.bam -L 1:10,020,000-10,021,000", 1,
Arrays.asList("fbfbd4d13b7ba3d76e8e186902e81378"));
Arrays.asList("823868a4b5b5ec2cdf080c059d04d31a"));
executeTest("test file has annotations, asking for annotations, #1", spec);
}
@ -86,7 +86,7 @@ public class VariantAnnotatorIntegrationTest extends WalkerTest {
public void testHasAnnotsAsking2() {
WalkerTestSpec spec = new WalkerTestSpec(
baseTestString() + " -G Standard --variant " + privateTestDir + "vcfexample3.vcf -I " + validationDataLocation + "NA12878.1kg.p2.chr1_10mb_11_mb.SLX.bam -L 1:10,000,000-10,050,000", 1,
Arrays.asList("19aef8914efc497192f89a9038310ca5"));
Arrays.asList("213560f395280e6a066d0b0497ce8881"));
executeTest("test file has annotations, asking for annotations, #2", spec);
}
@ -112,7 +112,7 @@ public class VariantAnnotatorIntegrationTest extends WalkerTest {
public void testNoAnnotsAsking1() {
WalkerTestSpec spec = new WalkerTestSpec(
baseTestString() + " -G Standard --variant " + privateTestDir + "vcfexample2empty.vcf -I " + validationDataLocation + "low_coverage_CEU.chr1.10k-11k.bam -L 1:10,020,000-10,021,000", 1,
Arrays.asList("4f0b8033da18e6cf6e9b8d5d36c21ba2"));
Arrays.asList("6f873b3152db291e18e3a04fbce2e117"));
executeTest("test file doesn't have annotations, asking for annotations, #1", spec);
}
@ -120,7 +120,7 @@ public class VariantAnnotatorIntegrationTest extends WalkerTest {
public void testNoAnnotsAsking2() {
WalkerTestSpec spec = new WalkerTestSpec(
baseTestString() + " -G Standard --variant " + privateTestDir + "vcfexample3empty.vcf -I " + validationDataLocation + "NA12878.1kg.p2.chr1_10mb_11_mb.SLX.bam -L 1:10,000,000-10,050,000", 1,
Arrays.asList("64ca176d587dfa2b3b9dec9f7999305c"));
Arrays.asList("d8089c5874ff35a7fd7e35ebd7d3b137"));
executeTest("test file doesn't have annotations, asking for annotations, #2", spec);
}
@ -128,7 +128,7 @@ public class VariantAnnotatorIntegrationTest extends WalkerTest {
public void testExcludeAnnotations() {
WalkerTestSpec spec = new WalkerTestSpec(
baseTestString() + " -G Standard -XA FisherStrand -XA ReadPosRankSumTest --variant " + privateTestDir + "vcfexample2empty.vcf -I " + validationDataLocation + "low_coverage_CEU.chr1.10k-11k.bam -L 1:10,020,000-10,021,000", 1,
Arrays.asList("f33f417fad98c05d9cd08ffa22943b0f"));
Arrays.asList("552c2ad9dbfaa85d51d2def93c8229c6"));
executeTest("test exclude annotations", spec);
}
@ -136,7 +136,7 @@ public class VariantAnnotatorIntegrationTest extends WalkerTest {
public void testOverwritingHeader() {
WalkerTestSpec spec = new WalkerTestSpec(
baseTestString() + " -G Standard --variant " + privateTestDir + "vcfexample4.vcf -I " + validationDataLocation + "NA12878.1kg.p2.chr1_10mb_11_mb.SLX.bam -L 1:10,001,292", 1,
Arrays.asList("0c810f6c4abef9d9dc5513ca872d3d22"));
Arrays.asList("0ed4c7760f6e7a158b6d743d257300f3"));
executeTest("test overwriting header", spec);
}

View File

@ -0,0 +1,164 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.annotator;
import net.sf.picard.reference.IndexedFastaSequenceFile;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.commandline.RodBinding;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.sting.utils.variant.GATKVariantContextUtils;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
import org.broadinstitute.variant.vcf.VCFConstants;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.*;
public class VariantOverlapAnnotatorUnitTest extends BaseTest {
private GenomeLocParser genomeLocParser;
private IndexedFastaSequenceFile seq;
@BeforeClass
public void setup() throws FileNotFoundException {
// sequence
seq = new CachingIndexedFastaSequenceFile(new File(b37KGReference));
genomeLocParser = new GenomeLocParser(seq);
}
private VariantContext makeVC(final String source, final String id, final List<String> alleles) {
final VariantContext vc = GATKVariantContextUtils.makeFromAlleles(source, "20", 10, alleles);
return new VariantContextBuilder(vc).id(id).make();
}
private VariantOverlapAnnotator makeAnnotator(final String dbSNP, final String ... overlaps) {
final RodBinding<VariantContext> dbSNPBinding = dbSNP == null ? null : new RodBinding<>(VariantContext.class, dbSNP);
final Map<RodBinding<VariantContext>, String> overlapBinding = new LinkedHashMap<>();
for ( final String overlap : overlaps ) overlapBinding.put(new RodBinding<>(VariantContext.class, overlap), overlap);
return new VariantOverlapAnnotator(dbSNPBinding, overlapBinding, genomeLocParser);
}
@Test
public void testCreateWithSpecialNames() {
final List<String> names = Arrays.asList("X", "Y", "Z");
final Map<RodBinding<VariantContext>, String> overlapBinding = new LinkedHashMap<>();
for ( final String overlap : names ) overlapBinding.put(new RodBinding<>(VariantContext.class, overlap + "Binding"), overlap);
final VariantOverlapAnnotator annotator = new VariantOverlapAnnotator(null, overlapBinding, genomeLocParser);
Assert.assertEquals(annotator.getOverlapNames(), names);
}
@DataProvider(name = "AnnotateRsIDData")
public Object[][] makeAnnotateRsIDData() {
List<Object[]> tests = new ArrayList<>();
// this functionality can be adapted to provide input data for whatever you might want in your data
final VariantContext callNoIDAC = makeVC("call", VCFConstants.EMPTY_ID_FIELD, Arrays.asList("A", "C"));
final VariantContext callNoIDAT = makeVC("call", VCFConstants.EMPTY_ID_FIELD, Arrays.asList("A", "T"));
final VariantContext callIDAC = makeVC("call", "foo", Arrays.asList("A", "C"));
final VariantContext callExistingIDAC = makeVC("call", "rsID1", Arrays.asList("A", "C"));
final VariantContext dbSNP_AC = makeVC("DBSNP", "rsID1", Arrays.asList("A", "C"));
final VariantContext dbSNP_AT = makeVC("DBSNP", "rsID2", Arrays.asList("A", "T"));
final VariantContext dbSNP_AG = makeVC("DBSNP", "rsID3", Arrays.asList("A", "G"));
final VariantContext dbSNP_AC_AT = makeVC("DBSNP", "rsID1;rsID2", Arrays.asList("A", "C", "T"));
final VariantContext dbSNP_AC_AG = makeVC("DBSNP", "rsID1;rsID3", Arrays.asList("A", "C", "G"));
tests.add(new Object[]{callNoIDAC, Arrays.asList(dbSNP_AC), dbSNP_AC.getID(), true});
tests.add(new Object[]{callNoIDAC, Arrays.asList(dbSNP_AT), VCFConstants.EMPTY_ID_FIELD, false});
tests.add(new Object[]{callIDAC, Arrays.asList(dbSNP_AC), "foo" + ";" + dbSNP_AC.getID(), true});
tests.add(new Object[]{callIDAC, Arrays.asList(dbSNP_AT), "foo", false});
tests.add(new Object[]{callExistingIDAC, Arrays.asList(dbSNP_AC), "rsID1", true});
tests.add(new Object[]{callExistingIDAC, Arrays.asList(dbSNP_AT), "rsID1", false});
final VariantContext callNoIDACT = makeVC("call", VCFConstants.EMPTY_ID_FIELD, Arrays.asList("A", "C", "T"));
tests.add(new Object[]{callNoIDACT, Arrays.asList(dbSNP_AC), dbSNP_AC.getID(), true});
tests.add(new Object[]{callNoIDACT, Arrays.asList(dbSNP_AT), dbSNP_AT.getID(), true});
tests.add(new Object[]{callNoIDACT, Arrays.asList(dbSNP_AG), VCFConstants.EMPTY_ID_FIELD, false});
tests.add(new Object[]{callNoIDACT, Arrays.asList(dbSNP_AC_AT), dbSNP_AC_AT.getID(), true});
tests.add(new Object[]{callNoIDACT, Arrays.asList(dbSNP_AC_AG), dbSNP_AC_AG.getID(), true});
// multiple options
tests.add(new Object[]{callNoIDAC, Arrays.asList(dbSNP_AC, dbSNP_AT), "rsID1", true});
tests.add(new Object[]{callNoIDAC, Arrays.asList(dbSNP_AT, dbSNP_AC), "rsID1", true});
tests.add(new Object[]{callNoIDAC, Arrays.asList(dbSNP_AC_AT), "rsID1;rsID2", true});
tests.add(new Object[]{callNoIDAT, Arrays.asList(dbSNP_AC_AT), "rsID1;rsID2", true});
tests.add(new Object[]{callNoIDAC, Arrays.asList(dbSNP_AC_AG), "rsID1;rsID3", true});
tests.add(new Object[]{callNoIDAT, Arrays.asList(dbSNP_AC_AG), VCFConstants.EMPTY_ID_FIELD, false});
final VariantContext dbSNP_AC_FAIL = new VariantContextBuilder(makeVC("DBSNP", "rsID1", Arrays.asList("A", "C"))).filter("FAIL").make();
tests.add(new Object[]{callNoIDAC, Arrays.asList(dbSNP_AC_FAIL), VCFConstants.EMPTY_ID_FIELD, false});
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "AnnotateRsIDData")
public void testAnnotateRsID(final VariantContext toAnnotate, final List<VariantContext> dbSNPRecords, final String expectedID, final boolean expectOverlap) throws Exception {
final VariantOverlapAnnotator annotator = makeAnnotator("dbnsp");
final VariantContext annotated = annotator.annotateRsID(dbSNPRecords, toAnnotate);
Assert.assertNotNull(annotated);
Assert.assertEquals(annotated.getID(), expectedID);
}
@Test(dataProvider = "AnnotateRsIDData")
public void testAnnotateOverlaps(final VariantContext toAnnotate, final List<VariantContext> records, final String expectedID, final boolean expectOverlap) throws Exception {
final String name = "binding";
final VariantOverlapAnnotator annotator = makeAnnotator(null, name);
final VariantContext annotated = annotator.annotateOverlap(records, name, toAnnotate);
Assert.assertNotNull(annotated);
Assert.assertEquals(annotated.getID(), toAnnotate.getID(), "Shouldn't modify annotation");
Assert.assertEquals(annotated.hasAttribute(name), expectOverlap);
if ( expectOverlap ) {
Assert.assertEquals(annotated.getAttribute(name), true);
}
}
}

View File

@ -62,7 +62,7 @@ public class BeagleIntegrationTest extends WalkerTest {
"--beagleR2:BEAGLE " + beagleValidationDataLocation + "inttestbgl.r2 " +
"--beagleProbs:BEAGLE " + beagleValidationDataLocation + "inttestbgl.gprobs " +
"--beaglePhased:BEAGLE " + beagleValidationDataLocation + "inttestbgl.phased " +
"-o %s --no_cmdline_in_header -U LENIENT_VCF_PROCESSING", 1, Arrays.asList("c5522304abf0633041c7772dd7dafcea"));
"-o %s --no_cmdline_in_header -U LENIENT_VCF_PROCESSING", 1, Arrays.asList("989449fa3e262b88ba126867fa3ad9fb"));
spec.disableShadowBCF();
executeTest("test BeagleOutputToVCF", spec);
}
@ -96,7 +96,7 @@ public class BeagleIntegrationTest extends WalkerTest {
"--beagleR2:beagle /humgen/gsa-hpprojects/GATK/data/Validation_Data/EUR_beagle_in_test.r2 "+
"--beagleProbs:beagle /humgen/gsa-hpprojects/GATK/data/Validation_Data/EUR_beagle_in_test.gprobs.bgl "+
"--beaglePhased:beagle /humgen/gsa-hpprojects/GATK/data/Validation_Data/EUR_beagle_in_test.phased.bgl "+
"-L 20:1-70000 -o %s --no_cmdline_in_header -U LENIENT_VCF_PROCESSING",1,Arrays.asList("d8906b67c7f9fdb5b37b8e9e050982d3"));
"-L 20:1-70000 -o %s --no_cmdline_in_header -U LENIENT_VCF_PROCESSING",1,Arrays.asList("e036636fcd6a748ede4a70ea47941d47"));
spec.disableShadowBCF();
executeTest("testBeagleChangesSitesToRef",spec);
}

View File

@ -0,0 +1,362 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.bqsr;
import org.broadinstitute.sting.WalkerTest;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.*;
import static org.testng.Assert.assertTrue;
/**
* Tests Analyze Covariates.
* <p/>
* Notice that since PDF report generated by R are different every-time this program
* is executed their content won't be tested. It only will verify that file has a healthy size.
*
*/
public class AnalyzeCovariatesIntegrationTest extends WalkerTest {
private static final String TOOL_NAME = AnalyzeCovariates.class.getSimpleName();
/**
* Directory where the testdata is located.
*/
private static final File TEST_DATA_DIR = new File(privateTestDir,"AnalyzeCovariates");
/**
* File containing the before report for normal testing.
*/
private static final File BEFORE_FILE = new File(TEST_DATA_DIR,"before.table");
/**
* File containing the after report for normal testing.
*/
private static final File AFTER_FILE = new File(TEST_DATA_DIR,"after.table");
/**
* File containing the bqsr report for normal testing.
*/
private static final File BQSR_FILE = new File(TEST_DATA_DIR,"bqsr.table");
/**
* Test the content of the generated csv file.
*
* @throws IOException should never happen. It would be an indicator of a
* problem with the testing environment.
*/
@Test(enabled = true)
public void testCsvGeneration()
throws IOException {
final WalkerTestSpec spec = new WalkerTestSpec(
buildCommandLine("%s",null,true,true,true),
Collections.singletonList("106709d32e6f0a0a9dd6a6340ec246ab"));
executeTest("testCsvGeneration",spec);
}
/**
* Test the size of the generated pdf.
* <p/>
* Unfortunately we cannot test the content as it changes slightly
* every time the tool is run.
*
* @throws IOException should never happen. It would be an
* indicator of a problem with the testing environment.
*/
@Test(enabled = true)
public void testPdfGeneration()
throws IOException {
final File pdfFile = File.createTempFile("ACTest",".pdf");
pdfFile.delete();
pdfFile.deleteOnExit();
final List<String> md5 = Collections.emptyList();
final WalkerTestSpec spec = new WalkerTestSpec(
buildCommandLine(null,pdfFile.toString(),true,true,true),md5);
executeTest("testPdfGeneration",spec);
assertTrue(pdfFile.exists(),"the pdf file was not created");
assertTrue(pdfFile.length() > 260000,"the pdf file size does"
+ " not reach the minimum of 260Kb");
}
/**
* Test the effect of changing some recalibration parameters.
* @param afterFileName name of the alternative after recalibration file.
* @param description describes what has been changed.
* @throws IOException should never happen. It would be an
* indicator of a problem with the testing environment.
*/
@Test(enabled = true, dataProvider="alternativeAfterFileProvider")
public void testParameterChangeException(final String afterFileName,
final String description)
throws IOException {
final File pdfFile = File.createTempFile("ACTest",".pdf");
pdfFile.deleteOnExit();
final List<String> md5 = Collections.emptyList();
final File afterFile = new File(TEST_DATA_DIR,afterFileName);
final WalkerTestSpec spec = new WalkerTestSpec(
buildCommandLine(null,"%s",true,true,afterFile),
1,UserException.IncompatibleRecalibrationTableParameters.class);
executeTest("testParameterChangeException - " + description, spec);
}
/**
* Test combinations of input and output inclusion exclusion of the command
* line that cause an exception to be thrown.
*
* @param useCsvFile whether to include the output csv file.
* @param usePdfFile whether to include the output pdf file.
* @param useBQSRFile whether to include the -BQSR input file.
* @param useBeforeFile whether to include the -before input file.
* @param useAfterFile whether to include the -after input file.
* @throws IOException never thrown, unless there is a problem with the testing environment.
*/
@Test(enabled = true, dataProvider="alternativeInOutAbsenceCombinations")
public void testInOutAbsenceException(final boolean useCsvFile, final boolean usePdfFile,
final boolean useBQSRFile, final boolean useBeforeFile, final boolean useAfterFile)
throws IOException {
final WalkerTestSpec spec = new WalkerTestSpec(buildCommandLine(useCsvFile,usePdfFile,
useBQSRFile,useBeforeFile,useAfterFile),0,UserException.class);
executeTest("testInOutAbsencePresenceException", spec);
}
/**
* Test combinations of input and output inclusion exclusion of the
* command line that won't cause an exception.
*
* @param useCsvFile whether to include the output csv file.
* @param usePdfFile whether to include the output pdf file.
* @param useBQSRFile whether to include the -BQSR input file.
* @param useBeforeFile whether to include the -before input file.
* @param useAfterFile whether to include the -after input file.
* @throws IOException never thrown, unless there is a problem with the testing environment.
*/
@Test(enabled = true, dataProvider="alternativeInOutAbsenceCombinations")
public void testInOutAbsence(final boolean useCsvFile, final boolean usePdfFile,
final boolean useBQSRFile, final boolean useBeforeFile, final boolean useAfterFile)
throws IOException {
final List<String> md5 = Collections.emptyList();
final WalkerTestSpec spec = new WalkerTestSpec(buildCommandLine(useCsvFile,usePdfFile,
useBQSRFile,useBeforeFile,useAfterFile),md5);
executeTest("testInOutAbsencePresence", spec);
}
@DataProvider
public Iterator<Object[]> alternativeInOutAbsenceCombinations(Method m) {
List<Object[]> result = new LinkedList<Object[]>();
if (m.getName().endsWith("Exception")) {
result.add(new Object[] { false, false, true, true, true });
result.add(new Object[] { true, true, false, false ,false});
}
else {
result.add(new Object[] { true, true, true, false, false });
result.add(new Object[] { true, true, false, true, false });
result.add(new Object[] { true, true, false, false, true });
result.add(new Object[] { true, false,false, true, false });
result.add(new Object[] { false, true, true, false, false });
}
return result.iterator();
}
/**
* Provide recalibration parameter change data to relevant tests.
* @param m target test method.
* @return never <code>null</code>.
*/
@DataProvider
public Iterator<Object[]> alternativeAfterFileProvider (Method m) {
final boolean expectsException = m.getName().endsWith("Exception");
final List<Object[]> result = new LinkedList<Object[]>();
for (final Object[] data : DIFFERENT_PARAMETERS_AFTER_FILES) {
if (data[1].equals(expectsException)) {
result.add(new Object[] { data[0], data[2] });
}
}
return result.iterator();
}
/**
* Triplets &lt; alfter-grp-file, whether it should fail, what is different &gt;
*/
private final Object[][] DIFFERENT_PARAMETERS_AFTER_FILES = {
{"after-cov.table", true, "Adds additional covariate: repeat-length" },
{"after-dpSOLID.table", true, "Change the default platform to SOLID" },
{"after-noDp.table",true, "Unset the default platform" },
{"after-mcs4.table", true, "Changed -mcs parameter from 2 to 4" }
};
/**
* Build the AC command line given what combinations of input and output files should be included.
*
* @param useCsvFile whether to include the output csv file.
* @param usePdfFile whether to include the output pdf file.
* @param useBQSRFile whether to include the -BQSR input file.
* @param useBeforeFile whether to include the -before input file.
* @param useAfterFile whether to include the -after input file.
* @return never <code>null</code>.
* @throws IOException never thrown, unless there is a problem with the testing environment.
*/
private String buildCommandLine(final boolean useCsvFile, final boolean usePdfFile,
final boolean useBQSRFile, final boolean useBeforeFile, final boolean useAfterFile)
throws IOException {
final File csvFile = useCsvFile ? File.createTempFile("ACTest",".csv") : null;
final File pdfFile = usePdfFile ? File.createTempFile("ACTest",".pdf") : null;
if (csvFile != null) {
csvFile.deleteOnExit();
}
if (pdfFile != null) {
pdfFile.deleteOnExit();
}
return buildCommandLine(csvFile == null ? null : csvFile.toString(),
pdfFile == null ? null : pdfFile.toString(),
useBQSRFile,useBeforeFile,useAfterFile);
}
/**
* Build the AC command line given the output file names explicitly and what test input files to use.
* <p/>
*
* @param csvFileName the csv output file, <code>null</code> if none should be provided.
* @param pdfFileName the plots output file, <code>null</code> if none should be provided.
* @param useBQSRFile whether to include the -BQSR input file.
* @param useBeforeFile whether to include the -before input file.
* @param useAfterFile whether to include the -after input file.
*
* @return never <code>null</code>.
*/
private String buildCommandLine(final String csvFileName, final String pdfFileName, final boolean useBQSRFile,
final boolean useBeforeFile, final boolean useAfterFile) {
return buildCommandLine(csvFileName,pdfFileName,useBQSRFile ? BQSR_FILE : null,
useBeforeFile ? BEFORE_FILE : null,
useAfterFile ? AFTER_FILE : null);
}
/**
* Build the AC command line given the output file names and the after file name explicitly and what other
* test input files to use.
* <p/>
*
* @param csvFileName the csv output file, <code>null</code> if none should be provided.
* @param pdfFileName the plots output file, <code>null</code> if none should be provided.
* @param useBQSRFile whether to include the -BQSR input file.
* @param useBeforeFile whether to include the -before input file.
* @param afterFile the after input report file, <code>null</code> if none should be provided.
*
* @return never <code>null</code>.
*/
private String buildCommandLine(final String csvFileName, final String pdfFileName, final boolean useBQSRFile,
final boolean useBeforeFile, final File afterFile) {
return buildCommandLine(csvFileName,pdfFileName,useBQSRFile ? BQSR_FILE : null,
useBeforeFile ? BEFORE_FILE : null,
afterFile);
}
/**
* Build the AC command line given the output file names and the after file name explicitly and what other
* test input files to use.
* <p/>
*
* @param csvFileName the csv output file, <code>null</code> if none should be provided.
* @param pdfFileName the plots output file, <code>null</code> if none should be provided.
* @param bqsrFile the BQSR input report file, <code>null</code> if none should be provided.
* @param beforeFile the before input report file, <code>null</code> if non should be provided.
* @param afterFile the after input report file, <code>null</code> if none should be provided.
*
* @return never <code>null</code>.
*/
private String buildCommandLine(final String csvFileName, final String pdfFileName, final File bqsrFile,
final File beforeFile, final File afterFile) {
final List<String> args = new LinkedList<String>();
args.add("-T");
args.add(TOOL_NAME);
args.add("-R");
args.add(hg19Reference);
args.add("-ignoreLMT");
if (csvFileName != null) {
args.add("-" + AnalyzeCovariates.CSV_ARG_SHORT_NAME);
args.add("'" + csvFileName + "'");
}
if (pdfFileName != null) {
args.add("-" + AnalyzeCovariates.PDF_ARG_SHORT_NAME);
args.add("'" + pdfFileName + "'");
}
if (bqsrFile != null) {
args.add("-BQSR");
args.add("'" + bqsrFile.getAbsoluteFile().toString() + "'");
}
if (beforeFile != null) {
args.add("-" + AnalyzeCovariates.BEFORE_ARG_SHORT_NAME);
args.add("'" + beforeFile.getAbsolutePath().toString() + "'");
}
if (afterFile != null) {
args.add("-" + AnalyzeCovariates.AFTER_ARG_SHORT_NAME);
args.add("'" + afterFile.getAbsolutePath().toString() + "'");
}
return Utils.join(" ", args);
}
}

View File

@ -100,23 +100,23 @@ public class BQSRIntegrationTest extends WalkerTest {
@DataProvider(name = "BQSRTest")
public Object[][] createBQSRTestData() {
return new Object[][]{
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, "", "61fd466b5e94d2d67e116f6f67c9f939")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --no_standard_covs -cov ContextCovariate", "e08b5bcdb64f4beea03730e5631a14ca")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --no_standard_covs -cov CycleCovariate", "448a45dc154c95d1387cb5cdddb67071")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --indels_context_size 4", "c1e7999e445d51bbe2e775dac5325643")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --low_quality_tail 5", "a57c16918cdfe12d55a89c21bf195279")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --quantizing_levels 6", "836dccacf48ccda6b2843d07e8f1ef4d")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --mismatches_context_size 4", "0fb2aedc2f8d66b5821cb570f15a8c4d")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12892.SLX.SRP000031.2009_06.selected.1Mb.1RG.bam", "1:10,000,000-10,200,000", "", "c9953f020a65c1603a6d71aeeb1b95f3")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA19240.chr1.BFAST.SOLID.bam", "1:10,000,000-10,200,000", "", "85a120b7d86b61597b86b9e93decbdfc")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12873.454.SRP000031.2009_06.chr1.10_20mb.1RG.bam", "1:10,000,000-10,200,000", "", "5248dc49aec0323c74b496bb4928c73c")},
{new BQSRTest(b36KGReference, validationDataLocation + "originalQuals.1kg.chr1.1-1K.1RG.bam", "1:1-1,000", " -OQ", "cb52f267e0010f849f50b0bf1de474a1")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA19240.chr1.BFAST.SOLID.bam", "1:10,000,000-20,000,000", " --solid_recal_mode REMOVE_REF_BIAS", "1425a5063ee757dbfc013df24e65a67a")},
{new BQSRTest(b36KGReference, privateTestDir + "NA19240.chr1.BFAST.SOLID.hasCSNoCall.bam", "1:50,000-80,000", " --solid_nocall_strategy LEAVE_READ_UNRECALIBRATED", "c1c3cda8caceed619d3d439c3990cd26")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12892.SLX.SRP000031.2009_06.selected.1Mb.1RG.bam", "1:10,000,000-10,200,000", " -knownSites:anyNameABCD,VCF " + privateTestDir + "vcfexample3.vcf", "c9953f020a65c1603a6d71aeeb1b95f3")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12892.SLX.SRP000031.2009_06.selected.1Mb.1RG.bam", "1:10,000,000-10,200,000", " -knownSites:bed " + validationDataLocation + "bqsrKnownTest.bed", "5bfff0c699345cca12a9b33acf95588f")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, "", "f805a0020eea987b79f314fa99913806")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --no_standard_covs -cov ContextCovariate", "86075d3856eb06816a0dd81af55e421f")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --no_standard_covs -cov CycleCovariate", "155802237e1fc7a001398b8f4bcf4b72")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --indels_context_size 4", "38c7916cc019fe8d134df67639422b42")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --low_quality_tail 5", "b74e75f3c5aa90bd21af1e20f2ac8c40")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --quantizing_levels 6", "e564505aea11464de8ed72890d9ea89a")},
{new BQSRTest(hg18Reference, HiSeqBam, HiSeqInterval, " --mismatches_context_size 4", "380d8be121ffaddd3461ee0ac3d1a76f")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12892.SLX.SRP000031.2009_06.selected.1Mb.1RG.bam", "1:10,000,000-10,200,000", "", "0b5a8e259e997e4c7b5836d4c28e6f4d")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA19240.chr1.BFAST.SOLID.bam", "1:10,000,000-10,200,000", "", "281682124584ab384f23359934df0c3b")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12873.454.SRP000031.2009_06.chr1.10_20mb.1RG.bam", "1:10,000,000-10,200,000", "", "0a92fdff5fd26227c29d34eda5a32f49")},
{new BQSRTest(b36KGReference, validationDataLocation + "originalQuals.1kg.chr1.1-1K.1RG.bam", "1:1-1,000", " -OQ", "90d8c24077e8ae9a0037a9aad5f09e31")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA19240.chr1.BFAST.SOLID.bam", "1:10,000,000-20,000,000", " --solid_recal_mode REMOVE_REF_BIAS", "c41ef02c640ef1fed4bfc03b9b33b616")},
{new BQSRTest(b36KGReference, privateTestDir + "NA19240.chr1.BFAST.SOLID.hasCSNoCall.bam", "1:50,000-80,000", " --solid_nocall_strategy LEAVE_READ_UNRECALIBRATED", "b577cd1d529425f66db49620db09fdca")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12892.SLX.SRP000031.2009_06.selected.1Mb.1RG.bam", "1:10,000,000-10,200,000", " -knownSites:anyNameABCD,VCF " + privateTestDir + "vcfexample3.vcf", "0b5a8e259e997e4c7b5836d4c28e6f4d")},
{new BQSRTest(b36KGReference, validationDataLocation + "NA12892.SLX.SRP000031.2009_06.selected.1Mb.1RG.bam", "1:10,000,000-10,200,000", " -knownSites:bed " + validationDataLocation + "bqsrKnownTest.bed", "9ad49269c0156f8ab1173261bf23e600")},
// make sure we work with ION torrent bam
{new BQSRTest(b37KGReference, privateTestDir + "iontorrent.bam", "20:10,000,000-10,200,000", "", "7375c7b692e76b651c278a9fb478fa1c")},
{new BQSRTest(b37KGReference, privateTestDir + "iontorrent.bam", "20:10,000,000-10,200,000", "", "04bfa4760767022e7f5252e6e4432cc1")},
};
}
@ -141,22 +141,6 @@ public class BQSRIntegrationTest extends WalkerTest {
executeTest("testBQSRFailWithoutDBSNP", spec);
}
@Test
public void testBQSRCSV() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
" -T BaseRecalibrator" +
" -R " + b36KGReference +
" -I " + validationDataLocation + "NA12892.SLX.SRP000031.2009_06.selected.bam" +
" -knownSites " + b36dbSNP129 +
" -L 1:10,000,000-10,200,000" +
" -o /dev/null" +
" -sortAllCols" +
" --plot_pdf_file /dev/null" +
" --intermediate_csv_file %s",
Arrays.asList("90ad19143024684e3c4410dc8fd2bd9d"));
executeTest("testBQSR-CSVfile", spec);
}
@Test
public void testBQSRFailWithSolidNoCall() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(

View File

@ -53,6 +53,7 @@ import org.testng.annotations.Test;
import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class ReduceReadsIntegrationTest extends WalkerTest {
@ -221,13 +222,13 @@ public class ReduceReadsIntegrationTest extends WalkerTest {
@Test(enabled = true)
public void testCoReduction() {
String base = String.format("-T ReduceReads %s -npt -R %s -I %s -I %s", COREDUCTION_L, REF, COREDUCTION_BAM_A, COREDUCTION_BAM_B) + " -o %s ";
String base = String.format("-T ReduceReads %s --cancer_mode -npt -R %s -I %s -I %s", COREDUCTION_L, REF, COREDUCTION_BAM_A, COREDUCTION_BAM_B) + " -o %s ";
executeTest("testCoReduction", new WalkerTestSpec(base, Arrays.asList("bam"), Arrays.asList("5f4d2c1d9c010dfd6865aeba7d0336fe")), COREDUCTION_QUALS_TEST_MD5);
}
@Test(enabled = true)
public void testCoReductionWithKnowns() {
String base = String.format("-T ReduceReads %s -npt -R %s -I %s -I %s -known %s", COREDUCTION_L, REF, COREDUCTION_BAM_A, COREDUCTION_BAM_B, DBSNP) + " -o %s ";
String base = String.format("-T ReduceReads %s --cancer_mode -npt -R %s -I %s -I %s -known %s", COREDUCTION_L, REF, COREDUCTION_BAM_A, COREDUCTION_BAM_B, DBSNP) + " -o %s ";
executeTest("testCoReductionWithKnowns", new WalkerTestSpec(base, Arrays.asList("bam"), Arrays.asList("ca48dd972bf57595c691972c0f887cb4")), COREDUCTION_QUALS_TEST_MD5);
}
@ -259,7 +260,7 @@ public class ReduceReadsIntegrationTest extends WalkerTest {
public void testDivideByZero() {
String base = String.format("-T ReduceReads %s -npt -R %s -I %s", DIVIDEBYZERO_L, REF, DIVIDEBYZERO_BAM) + " -o %s ";
// we expect to lose coverage due to the downsampling so don't run the systematic tests
executeTestWithoutAdditionalRRTests("testDivideByZero", new WalkerTestSpec(base, Arrays.asList("bam"), Arrays.asList("1663f35802f82333c5e15653e437ce2d")));
executeTestWithoutAdditionalRRTests("testDivideByZero", new WalkerTestSpec(base, Arrays.asList("bam"), Arrays.asList("4f0ef477c0417d1eb602b323474ef377")));
}
/**
@ -281,5 +282,24 @@ public class ReduceReadsIntegrationTest extends WalkerTest {
" -o %s --downsample_coverage 250 -dcov 50 ";
executeTest("testPairedReadsInVariantRegion", new WalkerTestSpec(base, Arrays.asList("bam"), Arrays.asList("7e7b358443827ca239db3b98f299aec6")), "2af063d1bd3c322b03405dbb3ecf59a9");
}
/**
* Confirm that this bam does not fail when multi-sample mode is enabled. The provided example is tricky and used to cause
* us to exception out in the code.
*/
@Test(enabled = true)
public void testMultiSampleDoesNotFailWithFlag() {
String cmd = "-T ReduceReads --cancer_mode -npt -R " + b37KGReference + " -I " + privateTestDir + "rr_multisample.bam -o /dev/null";
executeTestWithoutAdditionalRRTests("testMultiSampleDoesNotFailWithFlag", new WalkerTestSpec(cmd, 0, Collections.<String>emptyList()));
}
/**
* Confirm that this bam fails when multi-sample mode is not enabled
*/
@Test(enabled = true)
public void testMultiSampleFailsWithoutFlag() {
String cmd = "-T ReduceReads -npt -R " + b37KGReference + " -I " + privateTestDir + "rr_multisample.bam -o /dev/null";
executeTestWithoutAdditionalRRTests("testMultiSampleDoesNotFailWithFlag", new WalkerTestSpec(cmd, 0, UserException.BadInput.class));
}
}

View File

@ -89,6 +89,25 @@ public class SlidingWindowUnitTest extends BaseTest {
return variantRegionBitset;
}
//////////////////////////////////////////////////////////////////////////////////////
//// Test for leading softclips immediately followed by an insertion in the CIGAR ////
//////////////////////////////////////////////////////////////////////////////////////
@Test(enabled = true)
public void testLeadingClipThenInsertion() {
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(header, "foo", 0, 1, 10);
read.setReadBases(Utils.dupBytes((byte) 'A', 10));
read.setBaseQualities(Utils.dupBytes((byte)30, 10));
read.setMappingQuality(30);
read.setCigarString("2S2I6M");
final SlidingWindow slidingWindow = new SlidingWindow("1", 0, 1);
slidingWindow.addRead(read);
Pair<ObjectSet<GATKSAMRecord>, CompressionStash> result = slidingWindow.close(null);
}
//////////////////////////////////////////////////////////////////////////////////////
//// This section tests the findVariantRegions() method and related functionality ////
//////////////////////////////////////////////////////////////////////////////////////

View File

@ -57,7 +57,7 @@ public class ErrorRatePerCycleIntegrationTest extends WalkerTest {
WalkerTestSpec spec = new WalkerTestSpec(
"-T ErrorRatePerCycle -R " + b37KGReference + " -I " + b37GoodBAM + " -L 20:10,000,000-10,100,000 -o %s",
1,
Arrays.asList("dccdf3cb3193d01a1a767097e4a5c35e"));
Arrays.asList("6191340f0b56ee81fb248c8f5c913a8e"));
executeTest("ErrorRatePerCycle:", spec);
}
}

View File

@ -66,11 +66,11 @@ public class DiagnoseTargetsIntegrationTest extends WalkerTest {
@Test(enabled = true)
public void testSingleSample() {
DTTest("testSingleSample ", "-I " + singleSample + " -max 75", "850304909477afa8c2a8f128d6eedde9");
DTTest("testSingleSample ", "-I " + singleSample + " -max 75", "1771e95aed2b3b240dc353f84e19847d");
}
@Test(enabled = true)
public void testMultiSample() {
DTTest("testMultiSample ", "-I " + multiSample, "bedd19bcf21d1a779f6706c0351c9d26");
DTTest("testMultiSample ", "-I " + multiSample, "c7f1691dbe5f121c4a79be823d3057e5");
}
}

View File

@ -69,16 +69,16 @@ public class UnifiedGenotyperGeneralPloidySuite1IntegrationTest extends WalkerTe
@Test(enabled = true)
public void testBOTH_GGA_Pools() {
executor.PC_LSV_Test(String.format(" -maxAltAlleles 2 -ploidy 24 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles %s", LSV_ALLELES), "LSV_BOTH_GGA", "BOTH", "71f16e19b7d52e8edee46f4121e59f54");
executor.PC_LSV_Test(String.format(" -maxAltAlleles 2 -ploidy 24 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles %s", LSV_ALLELES), "LSV_BOTH_GGA", "BOTH", "dac2d7969e109aee9ad2dad573759f58");
}
@Test(enabled = true)
public void testINDEL_GGA_Pools() {
executor.PC_LSV_Test(String.format(" -maxAltAlleles 1 -ploidy 24 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles %s", LSV_ALLELES), "LSV_INDEL_GGA", "INDEL", "3f7d763c654f1d708323f369ea4a099b");
executor.PC_LSV_Test(String.format(" -maxAltAlleles 1 -ploidy 24 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles %s", LSV_ALLELES), "LSV_INDEL_GGA", "INDEL", "ceb105e3db0f2b993e3d725b0d60b6a3");
}
@Test(enabled = true)
public void testINDEL_maxAltAlleles2_ploidy1_Pools_noRef() {
executor.PC_LSV_Test_NoRef(" -maxAltAlleles 2 -ploidy 1", "LSV_INDEL_DISC_NOREF_p1", "INDEL", "603416111f34e2a735163fa97e1a8272");
executor.PC_LSV_Test_NoRef(" -maxAltAlleles 2 -ploidy 1", "LSV_INDEL_DISC_NOREF_p1", "INDEL", "98f4d78aad745c6e853b81b2e4e207b4");
}
}

View File

@ -58,16 +58,16 @@ public class UnifiedGenotyperGeneralPloidySuite2IntegrationTest extends WalkerTe
@Test(enabled = true)
public void testINDEL_maxAltAlleles2_ploidy3_Pools_noRef() {
executor.PC_LSV_Test_NoRef(" -maxAltAlleles 2 -ploidy 3","LSV_INDEL_DISC_NOREF_p3","INDEL","13de8558acaa0b9082f2df477b45de9b");
executor.PC_LSV_Test_NoRef(" -maxAltAlleles 2 -ploidy 3","LSV_INDEL_DISC_NOREF_p3","INDEL","25902d7a6a0c00c60c2d5845dfaa1a4c");
}
@Test(enabled = true)
public void testMT_SNP_DISCOVERY_sp4() {
executor.PC_MT_Test(CEUTRIO_BAM, " -maxAltAlleles 1 -ploidy 8", "MT_SNP_DISCOVERY_sp4","3fc6f4d458313616727c60e49c0e852b");
executor.PC_MT_Test(CEUTRIO_BAM, " -maxAltAlleles 1 -ploidy 8", "MT_SNP_DISCOVERY_sp4","5d55b71688a0777a7c0247c376401368");
}
@Test(enabled = true)
public void testMT_SNP_GGA_sp10() {
executor.PC_MT_Test(CEUTRIO_BAM, String.format(" -maxAltAlleles 1 -ploidy 20 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles %s",NA12891_CALLS), "MT_SNP_GGA_sp10", "1bebbc0f28bff6fd64736ccca8839df8");
executor.PC_MT_Test(CEUTRIO_BAM, String.format(" -maxAltAlleles 1 -ploidy 20 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles %s",NA12891_CALLS), "MT_SNP_GGA_sp10", "cf336d66a109c55f90e9ed2b3bc196c8");
}
}

View File

@ -73,7 +73,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
" -o %s" +
" -L 1:10,000,000-10,500,000",
1,
Arrays.asList("d8b0c5be39ec6b239641c2f2646d2bc3"));
Arrays.asList("ef8151aa699da3272c1ae0986d16ca21"));
executeTest(String.format("test indel caller in SLX"), spec);
}
@ -88,7 +88,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
" -minIndelCnt 1" +
" -L 1:10,000,000-10,100,000",
1,
Arrays.asList("d9572a227ccb13a6baa6dc4fb65bc1e5"));
Arrays.asList("7f88229ccefb74513efb199b61183cb8"));
executeTest(String.format("test indel caller in SLX with low min allele count"), spec);
}
@ -101,7 +101,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
" -o %s" +
" -L 1:10,000,000-10,500,000",
1,
Arrays.asList("8d9b8f8a1479322961c840e461b6dba8"));
Arrays.asList("1928ad48bcd0ca180e046bc235cfb3f4"));
executeTest(String.format("test indel calling, multiple technologies"), spec);
}
@ -111,7 +111,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
baseCommandIndels + " --genotyping_mode GENOTYPE_GIVEN_ALLELES -alleles " + privateTestDir + "indelAllelesForUG.vcf -I " + validationDataLocation +
"pilot2_daughters.chr20.10k-11k.bam -o %s -L 20:10,000,000-10,100,000", 1,
Arrays.asList("16d975480ff1e689113171805b916b62"));
Arrays.asList("6663e434a7b549f23bfd52db90e53a1a"));
executeTest("test MultiSample Pilot2 indels with alleles passed in", spec);
}
@ -121,7 +121,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
baseCommandIndels + " --output_mode EMIT_ALL_SITES --genotyping_mode GENOTYPE_GIVEN_ALLELES -alleles "
+ privateTestDir + "indelAllelesForUG.vcf -I " + validationDataLocation +
"pilot2_daughters.chr20.10k-11k.bam -o %s -L 20:10,000,000-10,100,000", 1,
Arrays.asList("60ed3f8d5bc3f765e6ce3fa698b68bb7"));
Arrays.asList("581c552664e536df6d0f102fb0d10e5a"));
executeTest("test MultiSample Pilot2 indels with alleles passed in and emitting all sites", spec);
}
@ -136,7 +136,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
WalkerTest.WalkerTestSpec spec2 = new WalkerTest.WalkerTestSpec(
baseCommandIndels + " --genotyping_mode GENOTYPE_GIVEN_ALLELES -alleles " + result.get(0).getAbsolutePath() + " -I " + validationDataLocation +
"low_coverage_CEU.chr1.10k-11k.bam -o %s -L " + result.get(0).getAbsolutePath(), 1,
Arrays.asList("3d4d66cc253eac55f16e5b0a36f17d8d"));
Arrays.asList("5596851d19582dd1af3901b7d703ae0a"));
executeTest("test MultiSample Pilot1 CEU indels using GENOTYPE_GIVEN_ALLELES", spec2);
}
@ -176,7 +176,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
public void testMinIndelFraction0() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
assessMinIndelFraction + " -minIndelFrac 0.0", 1,
Arrays.asList("264325878b988acc11d8e5d9d2ba0b7f"));
Arrays.asList("862d82c8aa35f1da4f9e67b5b48dfe52"));
executeTest("test minIndelFraction 0.0", spec);
}
@ -184,7 +184,7 @@ public class UnifiedGenotyperIndelCallingIntegrationTest extends WalkerTest {
public void testMinIndelFraction25() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
assessMinIndelFraction + " -minIndelFrac 0.25", 1,
Arrays.asList("98abcfb0a008050eba8b9c285a25b2a0"));
Arrays.asList("8d9fc96be07db791737ac18135de4d63"));
executeTest("test minIndelFraction 0.25", spec);
}

View File

@ -46,11 +46,15 @@
package org.broadinstitute.sting.gatk.walkers.genotyper;
import net.sf.samtools.util.BlockCompressedInputStream;
import org.broad.tribble.readers.AsciiLineReader;
import org.broadinstitute.sting.WalkerTest;
import org.broadinstitute.sting.gatk.GenomeAnalysisEngine;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.io.File;
import java.util.Arrays;
import java.util.Collections;
@ -156,6 +160,14 @@ public class UnifiedGenotyperIntegrationTest extends WalkerTest {
}
@Test
public void emitPLsAtAllSites() {
WalkerTest.WalkerTestSpec spec1 = new WalkerTest.WalkerTestSpec(
baseCommand + " -I " + validationDataLocation + "NA12878.1kg.p2.chr1_10mb_11_mb.SLX.bam -o %s -L 1:10,000,000-10,010,000 --output_mode EMIT_ALL_SITES -allSitePLs", 1,
Arrays.asList("7cc55db8693759e059a05bc4398f6f69"));
executeTest("test all site PLs 1", spec1);
}
// --------------------------------------------------------------------------------------------------------------
//
// testing heterozygosity
@ -288,9 +300,24 @@ public class UnifiedGenotyperIntegrationTest extends WalkerTest {
@Test
public void testNsInCigar() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
final WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T UnifiedGenotyper --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -I " + privateTestDir + "testWithNs.bam -o %s -L 8:141813600-141813700 -out_mode EMIT_ALL_SITES", 1,
Arrays.asList("2ae3fd39c53a6954d32faed8703adfe8"));
UserException.UnsupportedCigarOperatorException.class);
executeTest("test calling on reads with Ns in CIGAR", spec);
}
@Test(enabled = true)
public void testCompressedVCFOutputWithNT() throws Exception {
WalkerTestSpec spec = new WalkerTestSpec("-T UnifiedGenotyper -R " + b37KGReference + " -I "
+ privateTestDir + "PCRFree.2x250.Illumina.20_10_11.bam"
+ " -o %s -L 20:10,000,000-10,100,000 -nt 4",
1, Arrays.asList("vcf.gz"), Arrays.asList(""));
final File vcf = executeTest("testCompressedVCFOutputWithNT", spec).first.get(0);
final AsciiLineReader reader = new AsciiLineReader(new BlockCompressedInputStream(vcf));
int nLines = 0;
while ( reader.readLine() != null )
nLines++;
Assert.assertTrue(nLines > 0);
}
}

View File

@ -64,7 +64,7 @@ public class UnifiedGenotyperNormalCallingIntegrationTest extends WalkerTest{
public void testMultiSamplePilot1() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
baseCommand + " -I " + validationDataLocation + "low_coverage_CEU.chr1.10k-11k.bam -o %s -L 1:10,022,000-10,025,000", 1,
Arrays.asList("a6c224235c21b4af816b1512eb0624df"));
Arrays.asList("a9466c1e3ce1fc4bac83086b25a6df54"));
executeTest("test MultiSample Pilot1", spec);
}
@ -80,7 +80,7 @@ public class UnifiedGenotyperNormalCallingIntegrationTest extends WalkerTest{
public void testWithAllelesPassedIn2() {
WalkerTest.WalkerTestSpec spec2 = new WalkerTest.WalkerTestSpec(
baseCommand + " --output_mode EMIT_ALL_SITES --genotyping_mode GENOTYPE_GIVEN_ALLELES -alleles " + privateTestDir + "allelesForUG.vcf -I " + validationDataLocation + "pilot2_daughters.chr20.10k-11k.bam -o %s -L 20:10,000,000-10,025,000", 1,
Arrays.asList("698e54aeae3130779d246b9480a4052c"));
Arrays.asList("3e646003c5b93da80c7d8e5d0ff2ee4e"));
executeTest("test MultiSample Pilot2 with alleles passed in and emitting all sites", spec2);
}
@ -96,7 +96,7 @@ public class UnifiedGenotyperNormalCallingIntegrationTest extends WalkerTest{
public void testMultipleSNPAlleles() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T UnifiedGenotyper --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -glm BOTH --dbsnp " + b37dbSNP129 + " -I " + privateTestDir + "multiallelic.snps.bam -o %s -L " + privateTestDir + "multiallelic.snps.intervals", 1,
Arrays.asList("09a1a4d4bf0289bcc5e8a958f783a989"));
Arrays.asList("06c85e8eab08b67244cf38fc785aca22"));
executeTest("test Multiple SNP alleles", spec);
}
@ -112,7 +112,7 @@ public class UnifiedGenotyperNormalCallingIntegrationTest extends WalkerTest{
public void testReverseTrim() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T UnifiedGenotyper --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -glm INDEL -I " + validationDataLocation + "CEUTrio.HiSeq.b37.chr20.10_11mb.bam -o %s -L 20:10289124 -L 20:10090289", 1,
Arrays.asList("57a1bb44967988f2b7ae7779127990ae"));
Arrays.asList("f3da1ff1e49a831af055ca52d6d07dd7"));
executeTest("test reverse trim", spec);
}
@ -120,7 +120,7 @@ public class UnifiedGenotyperNormalCallingIntegrationTest extends WalkerTest{
public void testMismatchedPLs() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T UnifiedGenotyper --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -glm INDEL -I " + privateTestDir + "mismatchedPLs.bam -o %s -L 1:24020341", 1,
Arrays.asList("3011c20165951ca43c8a4e86a5835dbd"));
Arrays.asList("20ff311f363c51b7385a76f6f296759c"));
executeTest("test mismatched PLs", spec);
}
}

View File

@ -63,18 +63,18 @@ public class UnifiedGenotyperReducedReadsIntegrationTest extends WalkerTest {
public void testReducedBam() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T UnifiedGenotyper --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -I " + privateTestDir + "bamExample.ReducedRead.ADAnnotation.bam -o %s -L 1:67,225,396-67,288,518", 1,
Arrays.asList("e6565060b44a7804935973efcd56e596"));
Arrays.asList("ffde0d5e23523e4bd9e7e18f62d37d0f"));
executeTest("test calling on a ReducedRead BAM", spec);
}
@Test
public void testReducedBamSNPs() {
testReducedCalling("SNP", "ab776d74c41ce2b859e2b2466a76204a");
testReducedCalling("SNP", "e8de8c523751ad2fa2ee20185ba5dea7");
}
@Test
public void testReducedBamINDELs() {
testReducedCalling("INDEL", "9a986b98ed014576ce923e07452447f4");
testReducedCalling("INDEL", "4b4902327fb132f9aaab3dd5ace934e1");
}

View File

@ -52,10 +52,7 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
* Date: 3/27/12
*/
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import net.sf.samtools.SAMFileHeader;
import net.sf.samtools.*;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.DeBruijnGraph;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
@ -76,55 +73,13 @@ public class DeBruijnAssemblerUnitTest extends BaseTest {
public void testReferenceCycleGraph() {
String refCycle = "ATCGAGGAGAGCGCCCCGAGATATATATATATATATTTGCGAGCGCGAGCGTTTTAAAAATTTTAGACGGAGAGATATATATATATGGGAGAGGGGATATATATATATCCCCCC";
String noCycle = "ATCGAGGAGAGCGCCCCGAGATATTATTTGCGAGCGCGAGCGTTTTAAAAATTTTAGACGGAGAGATGGGAGAGGGGATATATAATATCCCCCC";
final DeBruijnGraph g1 = new DeBruijnAssembler().createGraphFromSequences(new ArrayList<GATKSAMRecord>(), 10, new Haplotype(refCycle.getBytes(), true));
final DeBruijnGraph g2 = new DeBruijnAssembler().createGraphFromSequences(new ArrayList<GATKSAMRecord>(), 10, new Haplotype(noCycle.getBytes(), true));
final DeBruijnGraph g1 = new DeBruijnAssembler().createGraphFromSequences(new ArrayList<GATKSAMRecord>(), 10, new Haplotype(refCycle.getBytes(), true), Collections.<Haplotype>emptyList());
final DeBruijnGraph g2 = new DeBruijnAssembler().createGraphFromSequences(new ArrayList<GATKSAMRecord>(), 10, new Haplotype(noCycle.getBytes(), true), Collections.<Haplotype>emptyList());
Assert.assertTrue(g1 == null, "Reference cycle graph should return null during creation.");
Assert.assertTrue(g2 != null, "Reference non-cycle graph should not return null during creation.");
}
@Test(enabled = !DEBUG)
public void testLeftAlignCigarSequentially() {
String preRefString = "GATCGATCGATC";
String postRefString = "TTT";
String refString = "ATCGAGGAGAGCGCCCCG";
String indelString1 = "X";
String indelString2 = "YZ";
int refIndel1 = 10;
int refIndel2 = 12;
for ( final int indelSize1 : Arrays.asList(1, 2, 3, 4) ) {
for ( final int indelOp1 : Arrays.asList(1, -1) ) {
for ( final int indelSize2 : Arrays.asList(1, 2, 3, 4) ) {
for ( final int indelOp2 : Arrays.asList(1, -1) ) {
Cigar expectedCigar = new Cigar();
expectedCigar.add(new CigarElement(refString.length(), CigarOperator.M));
expectedCigar.add(new CigarElement(indelSize1, (indelOp1 > 0 ? CigarOperator.I : CigarOperator.D)));
expectedCigar.add(new CigarElement((indelOp1 < 0 ? refIndel1 - indelSize1 : refIndel1), CigarOperator.M));
expectedCigar.add(new CigarElement(refString.length(), CigarOperator.M));
expectedCigar.add(new CigarElement(indelSize2 * 2, (indelOp2 > 0 ? CigarOperator.I : CigarOperator.D)));
expectedCigar.add(new CigarElement((indelOp2 < 0 ? (refIndel2 - indelSize2) * 2 : refIndel2 * 2), CigarOperator.M));
expectedCigar.add(new CigarElement(refString.length(), CigarOperator.M));
Cigar givenCigar = new Cigar();
givenCigar.add(new CigarElement(refString.length() + refIndel1/2, CigarOperator.M));
givenCigar.add(new CigarElement(indelSize1, (indelOp1 > 0 ? CigarOperator.I : CigarOperator.D)));
givenCigar.add(new CigarElement((indelOp1 < 0 ? (refIndel1/2 - indelSize1) : refIndel1/2) + refString.length() + refIndel2/2 * 2, CigarOperator.M));
givenCigar.add(new CigarElement(indelSize2 * 2, (indelOp2 > 0 ? CigarOperator.I : CigarOperator.D)));
givenCigar.add(new CigarElement((indelOp2 < 0 ? (refIndel2/2 - indelSize2) * 2 : refIndel2/2 * 2) + refString.length(), CigarOperator.M));
String theRef = preRefString + refString + Utils.dupString(indelString1, refIndel1) + refString + Utils.dupString(indelString2, refIndel2) + refString + postRefString;
String theRead = refString + Utils.dupString(indelString1, refIndel1 + indelOp1 * indelSize1) + refString + Utils.dupString(indelString2, refIndel2 + indelOp2 * indelSize2) + refString;
Cigar calculatedCigar = new DeBruijnAssembler().leftAlignCigarSequentially(AlignmentUtils.consolidateCigar(givenCigar), theRef.getBytes(), theRead.getBytes(), preRefString.length(), 0);
Assert.assertEquals(AlignmentUtils.consolidateCigar(calculatedCigar).toString(), AlignmentUtils.consolidateCigar(expectedCigar).toString(), "Cigar strings do not match!");
}
}
}
}
}
private static class MockBuilder extends DeBruijnGraphBuilder {
public final List<Kmer> addedPairs = new LinkedList<Kmer>();
@ -165,7 +120,7 @@ public class DeBruijnAssemblerUnitTest extends BaseTest {
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "AddReadKmersToGraph")
@Test(dataProvider = "AddReadKmersToGraph", enabled = ! DEBUG)
public void testAddReadKmersToGraph(final String bases, final int kmerSize, final List<Integer> badQualsSites) {
final int readLen = bases.length();
final DeBruijnAssembler assembler = new DeBruijnAssembler();
@ -198,4 +153,47 @@ public class DeBruijnAssemblerUnitTest extends BaseTest {
Assert.assertTrue(expectedBases.contains(new String(addedKmer.bases())), "Couldn't find kmer " + addedKmer + " among all expected kmers " + expectedBases);
}
}
@DataProvider(name = "AddGGAKmersToGraph")
public Object[][] makeAddGGAKmersToGraphData() {
List<Object[]> tests = new ArrayList<Object[]>();
// this functionality can be adapted to provide input data for whatever you might want in your data
final String bases = "ACGTAACCGGTTAAACCCGGGTTT";
final int readLen = bases.length();
final List<Integer> allBadStarts = new ArrayList<Integer>(readLen);
for ( int i = 0; i < readLen; i++ ) allBadStarts.add(i);
for ( final int kmerSize : Arrays.asList(3, 4, 5) ) {
tests.add(new Object[]{bases, kmerSize});
}
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "AddGGAKmersToGraph", enabled = ! DEBUG)
public void testAddGGAKmersToGraph(final String bases, final int kmerSize) {
final int readLen = bases.length();
final DeBruijnAssembler assembler = new DeBruijnAssembler();
final MockBuilder builder = new MockBuilder(kmerSize);
final Set<String> expectedBases = new HashSet<String>();
final Set<Integer> expectedStarts = new LinkedHashSet<Integer>();
for ( int i = 0; i < readLen; i++) {
boolean good = true;
for ( int j = 0; j < kmerSize + 1; j++ ) { // +1 is for pairing
good &= i + j < readLen;
}
if ( good ) {
expectedStarts.add(i);
expectedBases.add(bases.substring(i, i + kmerSize + 1));
}
}
assembler.addGGAKmersToGraph(builder, Arrays.asList(new Haplotype(bases.getBytes())));
Assert.assertEquals(builder.addedPairs.size(), expectedStarts.size());
for ( final Kmer addedKmer : builder.addedPairs ) {
Assert.assertTrue(expectedBases.contains(new String(addedKmer.bases())), "Couldn't find kmer " + addedKmer + " among all expected kmers " + expectedBases);
}
}
}

View File

@ -64,7 +64,7 @@ public class HaplotypeCallerComplexAndSymbolicVariantsIntegrationTest extends Wa
@Test
public void testHaplotypeCallerMultiSampleComplex1() {
HCTestComplexVariants(privateTestDir + "AFR.complex.variants.bam", "", "27db36467d40c3cde201f5826e959d78");
HCTestComplexVariants(privateTestDir + "AFR.complex.variants.bam", "", "4a3479fc4ad387d381593b328f737a1b");
}
private void HCTestSymbolicVariants(String bam, String args, String md5) {
@ -88,12 +88,12 @@ public class HaplotypeCallerComplexAndSymbolicVariantsIntegrationTest extends Wa
@Test
public void testHaplotypeCallerMultiSampleGGAComplex() {
HCTestComplexGGA(NA12878_CHR20_BAM, "-L 20:119673-119823 -L 20:121408-121538",
"ed3b577e6f7d68bba6774a62d9df9cd9");
"b7a01525c00d02b3373513a668a43c6a");
}
@Test
public void testHaplotypeCallerMultiSampleGGAMultiAllelic() {
HCTestComplexGGA(NA12878_CHR20_BAM, "-L 20:133041-133161 -L 20:300207-300337",
"a594a28d8053c3e969c39de81a9d03d6");
"a2a42055b068334f415efb07d6bb9acd");
}
}

View File

@ -47,15 +47,12 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import net.sf.picard.reference.IndexedFastaSequenceFile;
import org.broad.tribble.TribbleIndexedFeatureReader;
import org.broadinstitute.sting.WalkerTest;
import org.broadinstitute.sting.gatk.GenomeAnalysisEngine;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.variant.GATKVCFUtils;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.vcf.VCFCodec;
import org.testng.annotations.Test;
import java.io.File;
@ -69,6 +66,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
final static String NA12878_CHR20_BAM = validationDataLocation + "NA12878.HiSeq.WGS.bwa.cleaned.recal.hg19.20.bam";
final static String CEUTRIO_BAM = validationDataLocation + "CEUTrio.HiSeq.b37.chr20.10_11mb.bam";
final static String NA12878_RECALIBRATED_BAM = privateTestDir + "NA12878.100kb.BQSRv2.example.bam";
final static String NA12878_PCRFREE = privateTestDir + "PCRFree.2x250.Illumina.20_10_11.bam";
final static String CEUTRIO_MT_TEST_BAM = privateTestDir + "CEUTrio.HiSeq.b37.MT.1_50.bam";
final static String INTERVALS_FILE = validationDataLocation + "NA12878.HiSeq.b37.chr20.10_11mb.test.intervals";
@ -80,12 +78,12 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerMultiSample() {
HCTest(CEUTRIO_BAM, "", "aeab5f0d40852e6332b96481981a0e46");
HCTest(CEUTRIO_BAM, "", "baa5a2eedc8f06ce9f8f98411ee09f8a");
}
@Test
public void testHaplotypeCallerSingleSample() {
HCTest(NA12878_BAM, "", "18d5671d8454e8a0c05ee5f6e9fabfe3");
HCTest(NA12878_BAM, "", "f09e03d41238697b23f95716a12667cb");
}
@Test(enabled = false) // can't annotate the rsID's yet
@ -96,12 +94,12 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerMultiSampleGGA() {
HCTest(CEUTRIO_BAM, "--max_alternate_alleles 3 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles " + validationDataLocation + "combined.phase1.chr20.raw.indels.sites.vcf",
"28c3b1f276ec8198801aafe880e40fb6");
"130d36448faeb7b8d4bce4be12dacd3a");
}
@Test
public void testHaplotypeCallerInsertionOnEdgeOfContig() {
HCTest(CEUTRIO_MT_TEST_BAM, "-dcov 90 -L MT:1-10", "7f1fb8f9587f64643f6612ef1dd6d4ae");
HCTest(CEUTRIO_MT_TEST_BAM, "-L MT:1-10", "7f1fb8f9587f64643f6612ef1dd6d4ae");
}
private void HCTestIndelQualityScores(String bam, String args, String md5) {
@ -112,7 +110,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerSingleSampleIndelQualityScores() {
HCTestIndelQualityScores(NA12878_RECALIBRATED_BAM, "", "bac6f98e910290722df28da44b41f06f");
HCTestIndelQualityScores(NA12878_RECALIBRATED_BAM, "", "7c20aa62633f4ce8ebf12950fbf05ec0");
}
private void HCTestNearbySmallIntervals(String bam, String args, String md5) {
@ -149,7 +147,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerNearbySmallIntervals() {
HCTestNearbySmallIntervals(NA12878_BAM, "", "65e7b1b72a2411d6360138049914aa3a");
HCTestNearbySmallIntervals(NA12878_BAM, "", "0ddc56f0a0fbcfefda79aa20b2ecf603");
}
// This problem bam came from a user on the forum and it spotted a problem where the ReadClipper
@ -166,7 +164,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void HCTestStructuralIndels() {
final String base = String.format("-T HaplotypeCaller --disableDithering -R %s -I %s", REF, privateTestDir + "AFR.structural.indels.bam") + " --no_cmdline_in_header -o %s -minPruning 6 -L 20:8187565-8187800 -L 20:18670537-18670730";
final WalkerTestSpec spec = new WalkerTestSpec(base, Arrays.asList("cb190c935541ebb9f660f713a882b922"));
final WalkerTestSpec spec = new WalkerTestSpec(base, Arrays.asList("91717e5e271742c2c9b67223e58f1320"));
executeTest("HCTestStructuralIndels: ", spec);
}
@ -188,7 +186,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
public void HCTestReducedBam() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T HaplotypeCaller --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -I " + privateTestDir + "bamExample.ReducedRead.ADAnnotation.bam -o %s -L 1:67,225,396-67,288,518", 1,
Arrays.asList("0df626cd0d76aca8a05a545d0b36bf23"));
Arrays.asList("5fe9310addf881bed4fde2354e59ce34"));
executeTest("HC calling on a ReducedRead BAM", spec);
}
@ -196,7 +194,30 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
public void testReducedBamWithReadsNotFullySpanningDeletion() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T HaplotypeCaller --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -I " + privateTestDir + "reduced.readNotFullySpanningDeletion.bam -o %s -L 1:167871297", 1,
Arrays.asList("8adfa8a27a312760dab50787da595c57"));
Arrays.asList("26a9917f6707536636451266de0116c3"));
executeTest("test calling on a ReducedRead BAM where the reads do not fully span a deletion", spec);
}
// --------------------------------------------------------------------------------------------------------------
//
// test dbSNP annotation
//
// --------------------------------------------------------------------------------------------------------------
@Test
public void HCTestDBSNPAnnotationWGS() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T HaplotypeCaller --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -I " + NA12878_PCRFREE + " -o %s -L 20:10,000,000-10,100,000 -D " + b37dbSNP132, 1,
Arrays.asList("cc6f2a76ee97ecc14a5f956ffbb21d88"));
executeTest("HC calling with dbSNP ID annotation on WGS intervals", spec);
}
@Test
public void HCTestDBSNPAnnotationWEx() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T HaplotypeCaller --disableDithering -R " + b37KGReference + " --no_cmdline_in_header -I " + NA12878_PCRFREE + " -o %s -L 20:10,000,000-11,000,000 -D " + b37dbSNP132
+ " -L " + hg19Intervals + " -isr INTERSECTION", 1,
Arrays.asList("51e91c8af61a6b47807165906baefb00"));
executeTest("HC calling with dbSNP ID annotation on WEx intervals", spec);
}
}

View File

@ -0,0 +1,79 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import org.broadinstitute.sting.WalkerTest;
import org.broadinstitute.sting.utils.haplotypeBAMWriter.HaplotypeBAMWriter;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class HaplotypeCallerParallelIntegrationTest extends WalkerTest {
@DataProvider(name = "NCTDataProvider")
public Object[][] makeNCTDataProvider() {
List<Object[]> tests = new ArrayList<Object[]>();
for ( final int nct : Arrays.asList(1, 2, 4) ) {
tests.add(new Object[]{nct, "9da4cc89590c4c64a36f4a9c820f8609"});
}
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "NCTDataProvider")
public void testHCNCT(final int nct, final String md5) {
WalkerTestSpec spec = new WalkerTestSpec(
"-T HaplotypeCaller -R " + b37KGReference + " --no_cmdline_in_header -I "
+ privateTestDir + "PCRFree.2x250.Illumina.20_10_11.bam -o %s " +
" -L 20:10,000,000-10,100,000 -G none -A -contamination 0.0 -nct " + nct, 1,
Arrays.asList(md5));
executeTest("HC test parallel HC with NCT with nct " + nct, spec);
}
}

View File

@ -1,48 +1,48 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
@ -50,6 +50,9 @@ import org.broadinstitute.sting.BaseTest;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.HashSet;
import java.util.Set;
public class KMerCounterCaseFixUnitTest extends BaseTest {
@Test
public void testMyData() {
@ -76,6 +79,18 @@ public class KMerCounterCaseFixUnitTest extends BaseTest {
testCounting(counter, "NNC", 0);
Assert.assertNotNull(counter.toString());
assertCounts(counter, 5);
assertCounts(counter, 4, "ATG");
assertCounts(counter, 3, "ATG", "ACC");
assertCounts(counter, 2, "ATG", "ACC", "AAA");
assertCounts(counter, 1, "ATG", "ACC", "AAA", "CTG", "NNA", "CCC");
}
private void assertCounts(final KMerCounter counter, final int minCount, final String ... expecteds) {
final Set<Kmer> expected = new HashSet<Kmer>();
for ( final String one : expecteds ) expected.add(new Kmer(one));
Assert.assertEquals(new HashSet<Kmer>(counter.getKmersWithCountsAtLeast(minCount)), expected);
}
private void testCounting(final KMerCounter counter, final String in, final int expectedCount) {

View File

@ -47,13 +47,12 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.gatk.GenomeAnalysisEngine;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.*;
public class KmerUnitTest extends BaseTest {
@DataProvider(name = "KMerCreationData")
@ -130,4 +129,40 @@ public class KmerUnitTest extends BaseTest {
}
}
}
@Test
public void testDifferingPositions() {
final String bases = "ACGTCAGACGTACGTTTGACGTCAGACGTACGT";
final Kmer baseKmer = new Kmer(bases.getBytes());
final int NUM_TEST_CASES = 30;
for (int test = 0; test < NUM_TEST_CASES; test++) {
final int numBasesToChange = test % bases.length();
// changes numBasesToChange bases - spread regularly through read string
final int step = (numBasesToChange > 0?Math.min(bases.length() / numBasesToChange,1) : 1);
final byte[] newBases = bases.getBytes().clone();
int actualChangedBases =0; // could be different from numBasesToChange due to roundoff
for (int idx=0; idx < numBasesToChange; idx+=step) {
// now change given positions
newBases[idx] = (newBases[idx] == (byte)'A'? (byte)'T':(byte)'A');
actualChangedBases++;
}
// compute changed positions
final int[] differingIndices = new int[newBases.length];
final byte[] differingBases = new byte[newBases.length];
final int numDiffs = baseKmer.getDifferingPositions(new Kmer(newBases),newBases.length,differingIndices,differingBases);
Assert.assertEquals(numDiffs,actualChangedBases);
for (int k=0; k < numDiffs; k++) {
final int idx = differingIndices[k];
Assert.assertTrue(newBases[idx] != bases.getBytes()[idx]);
Assert.assertEquals(differingBases[idx],newBases[idx]);
}
}
}
}

View File

@ -0,0 +1,287 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import net.sf.picard.reference.IndexedFastaSequenceFile;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import net.sf.samtools.SAMFileHeader;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.readthreading.ReadThreadingAssembler;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.UnvalidatingGenomeLoc;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
import org.broadinstitute.sting.utils.collections.PrimitivePair;
import org.broadinstitute.sting.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.sam.ArtificialSAMUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.*;
public class LocalAssemblyEngineUnitTest extends BaseTest {
private GenomeLocParser genomeLocParser;
private IndexedFastaSequenceFile seq;
private SAMFileHeader header;
@BeforeClass
public void setup() throws FileNotFoundException {
seq = new CachingIndexedFastaSequenceFile(new File(b37KGReference));
genomeLocParser = new GenomeLocParser(seq);
header = ArtificialSAMUtils.createArtificialSamHeader(seq.getSequenceDictionary());
}
private enum Assembler {DEBRUIJN_ASSEMBLER, READ_THREADING_ASSEMBLER}
private LocalAssemblyEngine createAssembler(final Assembler type) {
switch ( type ) {
case DEBRUIJN_ASSEMBLER: return new DeBruijnAssembler();
case READ_THREADING_ASSEMBLER: return new ReadThreadingAssembler();
default: throw new IllegalStateException("Unexpected " + type);
}
}
@DataProvider(name = "AssembleIntervalsData")
public Object[][] makeAssembleIntervalsData() {
List<Object[]> tests = new ArrayList<Object[]>();
final String contig = "20";
final int start = 10000000;
final int end = 10100000;
final int windowSize = 100;
final int stepSize = 200;
final int nReadsToUse = 5;
for ( final Assembler assembler : Assembler.values() ) {
for ( int startI = start; startI < end; startI += stepSize) {
final int endI = startI + windowSize;
final GenomeLoc refLoc = genomeLocParser.createGenomeLoc(contig, startI, endI);
tests.add(new Object[]{assembler, refLoc, nReadsToUse});
}
}
return tests.toArray(new Object[][]{});
}
@DataProvider(name = "AssembleIntervalsWithVariantData")
public Object[][] makeAssembleIntervalsWithVariantData() {
List<Object[]> tests = new ArrayList<Object[]>();
final String contig = "20";
final int start = 10000000;
final int end = 10001000;
final int windowSize = 100;
final int stepSize = 200;
final int variantStepSize = 1;
final int nReadsToUse = 5;
for ( final Assembler assembler : Assembler.values() ) {
for ( int startI = start; startI < end; startI += stepSize) {
final int endI = startI + windowSize;
final GenomeLoc refLoc = genomeLocParser.createGenomeLoc(contig, startI, endI);
for ( int variantStart = windowSize / 2 - 10; variantStart < windowSize / 2 + 10; variantStart += variantStepSize ) {
tests.add(new Object[]{assembler, refLoc, nReadsToUse, variantStart});
}
}
}
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "AssembleIntervalsData")
public void testAssembleRef(final Assembler assembler, final GenomeLoc loc, final int nReadsToUse) {
final byte[] refBases = seq.getSubsequenceAt(loc.getContig(), loc.getStart(), loc.getStop()).getBases();
final List<GATKSAMRecord> reads = new LinkedList<GATKSAMRecord>();
for ( int i = 0; i < nReadsToUse; i++ ) {
final byte[] bases = refBases.clone();
final byte[] quals = Utils.dupBytes((byte) 30, refBases.length);
final String cigar = refBases.length + "M";
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(header, loc.getContig(), loc.getContigIndex(), loc.getStart(), bases, quals, cigar);
reads.add(read);
}
// TODO -- generalize to all assemblers
final Haplotype refHaplotype = new Haplotype(refBases, true);
final List<Haplotype> haplotypes = assemble(assembler, refBases, loc, reads);
Assert.assertEquals(haplotypes, Collections.singletonList(refHaplotype));
}
@Test(dataProvider = "AssembleIntervalsWithVariantData")
public void testAssembleRefAndSNP(final Assembler assembler, final GenomeLoc loc, final int nReadsToUse, final int variantSite) {
final byte[] refBases = seq.getSubsequenceAt(loc.getContig(), loc.getStart(), loc.getStop()).getBases();
final Allele refBase = Allele.create(refBases[variantSite], true);
final Allele altBase = Allele.create((byte)(refBase.getBases()[0] == 'A' ? 'C' : 'A'), false);
final VariantContextBuilder vcb = new VariantContextBuilder("x", loc.getContig(), variantSite, variantSite, Arrays.asList(refBase, altBase));
testAssemblyWithVariant(assembler, refBases, loc, nReadsToUse, vcb.make());
}
@Test(dataProvider = "AssembleIntervalsWithVariantData")
public void testAssembleRefAndDeletion(final Assembler assembler, final GenomeLoc loc, final int nReadsToUse, final int variantSite) {
final byte[] refBases = seq.getSubsequenceAt(loc.getContig(), loc.getStart(), loc.getStop()).getBases();
for ( int deletionLength = 1; deletionLength < 10; deletionLength++ ) {
final Allele refBase = Allele.create(new String(refBases).substring(variantSite, variantSite + deletionLength + 1), true);
final Allele altBase = Allele.create(refBase.getBases()[0], false);
final VariantContextBuilder vcb = new VariantContextBuilder("x", loc.getContig(), variantSite, variantSite + deletionLength, Arrays.asList(refBase, altBase));
testAssemblyWithVariant(assembler, refBases, loc, nReadsToUse, vcb.make());
}
}
@Test(dataProvider = "AssembleIntervalsWithVariantData")
public void testAssembleRefAndInsertion(final Assembler assembler, final GenomeLoc loc, final int nReadsToUse, final int variantSite) {
final byte[] refBases = seq.getSubsequenceAt(loc.getContig(), loc.getStart(), loc.getStop()).getBases();
for ( int insertionLength = 1; insertionLength < 10; insertionLength++ ) {
final Allele refBase = Allele.create(refBases[variantSite], false);
final Allele altBase = Allele.create(new String(refBases).substring(variantSite, variantSite + insertionLength + 1), true);
final VariantContextBuilder vcb = new VariantContextBuilder("x", loc.getContig(), variantSite, variantSite + insertionLength, Arrays.asList(refBase, altBase));
testAssemblyWithVariant(assembler, refBases, loc, nReadsToUse, vcb.make());
}
}
private void testAssemblyWithVariant(final Assembler assembler, final byte[] refBases, final GenomeLoc loc, final int nReadsToUse, final VariantContext site) {
final String preRef = new String(refBases).substring(0, site.getStart());
final String postRef = new String(refBases).substring(site.getEnd() + 1, refBases.length);
final byte[] altBases = (preRef + site.getAlternateAllele(0).getBaseString() + postRef).getBytes();
// logger.warn("ref " + new String(refBases));
// logger.warn("alt " + new String(altBases));
final List<GATKSAMRecord> reads = new LinkedList<GATKSAMRecord>();
for ( int i = 0; i < nReadsToUse; i++ ) {
final byte[] bases = altBases.clone();
final byte[] quals = Utils.dupBytes((byte) 30, altBases.length);
final String cigar = altBases.length + "M";
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(header, loc.getContig(), loc.getContigIndex(), loc.getStart(), bases, quals, cigar);
reads.add(read);
}
final Haplotype refHaplotype = new Haplotype(refBases, true);
final Haplotype altHaplotype = new Haplotype(altBases, false);
final List<Haplotype> haplotypes = assemble(assembler, refBases, loc, reads);
Assert.assertEquals(haplotypes, Arrays.asList(refHaplotype, altHaplotype));
}
private List<Haplotype> assemble(final Assembler assembler, final byte[] refBases, final GenomeLoc loc, final List<GATKSAMRecord> reads) {
final Haplotype refHaplotype = new Haplotype(refBases, true);
final Cigar c = new Cigar();
c.add(new CigarElement(refHaplotype.getBases().length, CigarOperator.M));
refHaplotype.setCigar(c);
final ActiveRegion activeRegion = new ActiveRegion(loc, null, true, genomeLocParser, 0);
activeRegion.addAll(reads);
final LocalAssemblyEngine engine = createAssembler(assembler);
// logger.warn("Assembling " + activeRegion + " with " + engine);
return engine.runLocalAssembly(activeRegion, refHaplotype, refBases, loc, Collections.<VariantContext>emptyList(), null);
}
@DataProvider(name = "SimpleAssemblyTestData")
public Object[][] makeSimpleAssemblyTestData() {
List<Object[]> tests = new ArrayList<Object[]>();
final String contig = "20";
final int start = 10000000;
final int windowSize = 200;
final int end = start + windowSize;
final Map<Assembler, Integer> edgeExcludesByAssembler = new EnumMap<>(Assembler.class);
edgeExcludesByAssembler.put(Assembler.DEBRUIJN_ASSEMBLER, 26);
edgeExcludesByAssembler.put(Assembler.READ_THREADING_ASSEMBLER, 25); // TODO -- decrease to zero when the edge calling problem is fixed
final String ref = new String(seq.getSubsequenceAt(contig, start, end).getBases());
final GenomeLoc refLoc = genomeLocParser.createGenomeLoc(contig, start, end);
for ( final Assembler assembler : Assembler.values() ) {
final int excludeVariantsWithXbp = edgeExcludesByAssembler.get(assembler);
for ( int snpPos = 0; snpPos < windowSize; snpPos++) {
if ( snpPos > excludeVariantsWithXbp && (windowSize - snpPos) >= excludeVariantsWithXbp ) {
final byte[] altBases = ref.getBytes();
altBases[snpPos] = altBases[snpPos] == 'A' ? (byte)'C' : (byte)'A';
final String alt = new String(altBases);
tests.add(new Object[]{"SNP at " + snpPos, assembler, refLoc, ref, alt});
}
}
}
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "SimpleAssemblyTestData")
public void testSimpleAssembly(final String name, final Assembler assembler, final GenomeLoc loc, final String ref, final String alt) {
final byte[] refBases = ref.getBytes();
final byte[] altBases = alt.getBytes();
final List<GATKSAMRecord> reads = new LinkedList<>();
for ( int i = 0; i < 20; i++ ) {
final byte[] bases = altBases.clone();
final byte[] quals = Utils.dupBytes((byte) 30, altBases.length);
final String cigar = altBases.length + "M";
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(header, loc.getContig(), loc.getContigIndex(), loc.getStart(), bases, quals, cigar);
reads.add(read);
}
final Haplotype refHaplotype = new Haplotype(refBases, true);
final Haplotype altHaplotype = new Haplotype(altBases, false);
final List<Haplotype> haplotypes = assemble(assembler, refBases, loc, reads);
Assert.assertTrue(haplotypes.size() > 0, "Failed to find ref haplotype");
Assert.assertEquals(haplotypes.get(0), refHaplotype);
Assert.assertEquals(haplotypes.size(), 2, "Failed to find single alt haplotype");
Assert.assertEquals(haplotypes.get(1), altHaplotype);
}
}

View File

@ -0,0 +1,190 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import org.broadinstitute.sting.utils.sam.ArtificialSAMUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class ReadErrorCorrectorUnitTest {
private static final boolean debug = true;
final String refChunk = "GCATAAACATGGCTCACTGC";
final String refChunkHard = "AGCCTTGAACTCCTGGGCTCAAGTGATCCTCCTGCCTCAGTTTCCCATGTAGCTGGGACCACAGGTGGGGGCTCCACCCCTGGCTGATTTTTTTTTTTTTTTTTTTTTGAGATAGGGT";
@Test
public void TestBasicCorrectionSet() {
final byte[] trueBases = refChunk.getBytes();
final int numCorrections = 50;
final ReadErrorCorrector.CorrectionSet correctionSet = new ReadErrorCorrector.CorrectionSet(trueBases.length);
int offset = 2;
for (int k=0; k < numCorrections; k++) {
// introduce one correction at a random offset in array. To make testing easier, we will replicate corrrection
final byte base = trueBases[offset];
correctionSet.add(offset, base);
// skip to some other offset
offset += 7;
if (offset >= trueBases.length)
offset -= trueBases.length;
}
for (int k=0; k < trueBases.length; k++) {
final byte corr = correctionSet.getConsensusCorrection(k);
Assert.assertEquals(corr, trueBases[k]);
}
}
@Test
public void TestExtendedCorrectionSet() {
final byte[] trueBases = refChunk.getBytes();
final int numCorrections = 50;
final ReadErrorCorrector.CorrectionSet correctionSet = new ReadErrorCorrector.CorrectionSet(trueBases.length);
for (int offset=0; offset < trueBases.length; offset++) {
// insert k corrections at offset k and make sure we get exactly k bases back
for (int k=0; k < offset; k++)
correctionSet.add(offset,trueBases[offset]);
}
for (int offset=0; offset < trueBases.length; offset++) {
Assert.assertEquals(correctionSet.get(offset).size(),offset);
}
}
@Test
public void TestAddReadsToKmers() {
final int NUM_GOOD_READS = 500;
final String bases = "AAAAAAAAAAAAAAA";
final int READ_LENGTH = bases.length();
final int kmerLengthForReadErrorCorrection = READ_LENGTH;
final List<GATKSAMRecord> finalizedReadList = new ArrayList<GATKSAMRecord>(NUM_GOOD_READS);
int offset = 0;
final byte[] quals = new byte[READ_LENGTH];
Arrays.fill(quals,(byte)30);
for (int k=0; k < NUM_GOOD_READS; k++) {
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(bases.getBytes(), quals,READ_LENGTH+"M");
finalizedReadList.add(read);
}
ReadErrorCorrector readErrorCorrector = new ReadErrorCorrector(kmerLengthForReadErrorCorrection,(byte)6,10, debug,refChunkHard.getBytes());
readErrorCorrector.addReadsToKmers(finalizedReadList);
// special trivial case: kmer length is equal to read length.
// K-mer counter should hold then exactly one kmer
Assert.assertEquals(readErrorCorrector.countsByKMer.getCountedKmers().size(), 1);
for (final KMerCounter.CountedKmer kmer : readErrorCorrector.countsByKMer.getCountedKmers()) {
Assert.assertTrue(Arrays.equals( kmer.getKmer().bases(),bases.getBytes()));
Assert.assertEquals(kmer.getCount(),NUM_GOOD_READS);
}
// special case 2: kmers are all the same but length < read length.
// Each kmer is added then readLength-kmerLength+1 times
final int KMER_LENGTH = 10;
readErrorCorrector = new ReadErrorCorrector(KMER_LENGTH,(byte)6,10, debug,refChunkHard.getBytes());
readErrorCorrector.addReadsToKmers(finalizedReadList);
Assert.assertEquals(readErrorCorrector.countsByKMer.getCountedKmers().size(), 1);
for (final KMerCounter.CountedKmer kmer : readErrorCorrector.countsByKMer.getCountedKmers()) {
Assert.assertEquals(kmer.getCount(),NUM_GOOD_READS*(READ_LENGTH-KMER_LENGTH+1));
}
}
@Test
public void TestBasicErrorCorrection() {
final int NUM_GOOD_READS = 500;
final int NUM_BAD_READS = 10;
final int READ_LENGTH = 15;
final int kmerLengthForReadErrorCorrection = 10;
final List<GATKSAMRecord> finalizedReadList = new ArrayList<GATKSAMRecord>(NUM_GOOD_READS);
int offset = 0;
final byte[] quals = new byte[READ_LENGTH];
Arrays.fill(quals,(byte)30);
for (int k=0; k < NUM_GOOD_READS; k++) {
final byte[] bases = Arrays.copyOfRange(refChunk.getBytes(),offset,offset+READ_LENGTH);
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(bases, quals,READ_LENGTH+"M");
finalizedReadList.add(read);
offset++;
if (offset >= refChunk.length()-READ_LENGTH)
offset = 0;
}
offset = 2;
// coverage profile is now perfectly triangular with "good" bases. Inject now bad bases with errors in them.
for (int k=0; k < NUM_BAD_READS; k++) {
final byte[] bases = finalizedReadList.get(k).getReadBases().clone();
bases[offset] = 'N';
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(bases, quals, READ_LENGTH + "M");
finalizedReadList.add(read);
offset += 7;
if (offset >= READ_LENGTH)
offset = 4; // just some randomly circulating offset for error position
}
// now correct all reads
final ReadErrorCorrector readErrorCorrector = new ReadErrorCorrector(kmerLengthForReadErrorCorrection,(byte)6,10, debug,refChunkHard.getBytes());
readErrorCorrector.addReadsToKmers(finalizedReadList);
readErrorCorrector.correctReads(finalizedReadList);
// check that corrected reads have exactly same content as original reads
for (int k=0; k < NUM_BAD_READS; k++) {
final byte[] badBases = finalizedReadList.get(k).getReadBases();
final byte[] originalBases = finalizedReadList.get(k).getReadBases();
Assert.assertTrue(Arrays.equals(badBases,originalBases));
}
}
}

View File

@ -83,7 +83,10 @@ public class BaseEdgeUnitTest extends BaseTest {
e.setMultiplicity(mult + 1);
Assert.assertEquals(e.getMultiplicity(), mult + 1);
final BaseEdge copy = new BaseEdge(e);
e.incMultiplicity(2);
Assert.assertEquals(e.getMultiplicity(), mult + 3);
final BaseEdge copy = e.copy();
Assert.assertEquals(copy.isRef(), e.isRef());
Assert.assertEquals(copy.getMultiplicity(), e.getMultiplicity());
}

View File

@ -49,8 +49,8 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import org.broadinstitute.sting.BaseTest;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import scala.actors.threadpool.Arrays;
import java.io.File;
import java.util.*;
@ -312,4 +312,19 @@ public class BaseGraphUnitTest extends BaseTest {
Assert.assertTrue(BaseGraph.graphEquals(graph, expectedGraph));
}
@Test(enabled = true)
public void testGetBases() {
final int kmerSize = 4;
final String testString = "AATGGGGGCAATACTA";
final List<DeBruijnVertex> vertexes = new ArrayList<>();
for ( int i = 0; i <= testString.length() - kmerSize; i++ ) {
vertexes.add(new DeBruijnVertex(testString.substring(i, i + kmerSize)));
}
final String result = new String(new DeBruijnGraph().getBasesForPath(vertexes));
Assert.assertEquals(result, testString.substring(kmerSize - 1));
}
}

View File

@ -137,12 +137,12 @@ public class CommonSuffixMergerUnitTest extends BaseTest {
public static void assertSameHaplotypes(final String name, final SeqGraph actual, final SeqGraph original) {
try {
final Set<String> haplotypes = new HashSet<String>();
final List<Path<SeqVertex>> originalPaths = new KBestPaths<SeqVertex>().getKBestPaths(original);
for ( final Path<SeqVertex> path : originalPaths )
final List<Path<SeqVertex,BaseEdge>> originalPaths = new KBestPaths<SeqVertex,BaseEdge>().getKBestPaths(original);
for ( final Path<SeqVertex,BaseEdge> path : originalPaths )
haplotypes.add(new String(path.getBases()));
final List<Path<SeqVertex>> splitPaths = new KBestPaths<SeqVertex>().getKBestPaths(actual);
for ( final Path<SeqVertex> path : splitPaths ) {
final List<Path<SeqVertex,BaseEdge>> splitPaths = new KBestPaths<SeqVertex,BaseEdge>().getKBestPaths(actual);
for ( final Path<SeqVertex,BaseEdge> path : splitPaths ) {
final String h = new String(path.getBases());
Assert.assertTrue(haplotypes.contains(h), "Failed to find haplotype " + h);
}
@ -166,4 +166,20 @@ public class CommonSuffixMergerUnitTest extends BaseTest {
splitter.merge(data.graph, data.v);
assertSameHaplotypes(String.format("suffixMerge.%s.%d", data.commonSuffix, data.graph.vertexSet().size()), data.graph, original);
}
@Test
public void testDoesntMergeSourceNodes() {
final SeqGraph g = new SeqGraph();
final SeqVertex v1 = new SeqVertex("A");
final SeqVertex v2 = new SeqVertex("A");
final SeqVertex v3 = new SeqVertex("A");
final SeqVertex top = new SeqVertex("T");
final SeqVertex b = new SeqVertex("C");
g.addVertices(top, v1, v2, v3, top, b);
g.addEdges(top, v1, b);
g.addEdges(v2, b); // v2 doesn't have previous node, cannot be merged
g.addEdges(top, v3, b);
final SharedSequenceMerger merger = new SharedSequenceMerger();
Assert.assertFalse(merger.merge(g, b), "Shouldn't be able to merge shared vertices, when one is a source");
}
}

Some files were not shown because too many files have changed in this diff Show More