Change the Map-of-maps-of-maps for an array based implementation ReadLikelihoods to hold read likelihoods.

The array structure should be faster to populate and query (no properly benchmarked) and reduce memory footprint considerably.
    Nevertheless removing PairHMM factor (using likelihoodEngine Random) it only achieves a speed up of 15% in some example WGS dataset
    i.e. there are other bigger bottle necks in the system. Bamboo tests also seem to run significantly faster with this change.

    Stories:

      https://www.pivotaltracker.com/story/show/70222086
      https://www.pivotaltracker.com/story/show/67961652

    Changes:

       - ReadLikelihoods added to substitute  Map<String,PerSampleReadLikelihoods>
       - Operation that involve changes in full sets of ReadLikelihoods have been moved into that class.
       - Simplified a bit the code that handles the downsampling of reads based on contamination

    Caveats:

       - Still we keep Map<String,PerReadAlleleLikelihoodsMap> around to pass to annotators..., didn't feel like change the interface of so many public classes in this pull-request.
This commit is contained in:
Valentin Ruano-Rubio 2014-08-07 17:03:12 -04:00
parent 09ac3779d6
commit 2914ecb585
19 changed files with 503 additions and 666 deletions

View File

@ -99,6 +99,11 @@ public class StandardCallerArgumentCollection implements Cloneable {
@Argument(fullName = "contamination_fraction_per_sample_file", shortName = "contaminationFile", doc = "Tab-separated File containing fraction of contamination in sequencing data (per sample) to aggressively remove. Format should be \"<SampleID><TAB><Contamination>\" (Contamination is double) per line; No header.", required = false)
public File CONTAMINATION_FRACTION_FILE = null;
/**
* Indicates whether there is some sample contamination present.
*/
private boolean sampleContaminationWasLoaded = false;
/**
*
* @return an _Immutable_ copy of the Sample-Contamination Map, defaulting to CONTAMINATION_FRACTION so that if the sample isn't in the map map(sample)==CONTAMINATION_FRACTION
@ -106,15 +111,32 @@ public class StandardCallerArgumentCollection implements Cloneable {
public Map<String,Double> getSampleContamination(){
//make sure that the default value is set up right
sampleContamination.setDefaultValue(CONTAMINATION_FRACTION);
if (!Double.isNaN(CONTAMINATION_FRACTION) && CONTAMINATION_FRACTION > 0.0)
sampleContaminationWasLoaded = true;
return Collections.unmodifiableMap(sampleContamination);
}
public void setSampleContamination(DefaultHashMap<String, Double> sampleContamination) {
this.sampleContamination.clear();
this.sampleContaminationWasLoaded = !Double.isNaN(CONTAMINATION_FRACTION) && CONTAMINATION_FRACTION > 0.0;
if (!sampleContaminationWasLoaded)
for (final Double d : sampleContamination.values())
if (!Double.isNaN(d) && d > 0.0) {
sampleContaminationWasLoaded = true;
break;
}
this.sampleContamination.putAll(sampleContamination);
this.sampleContamination.setDefaultValue(CONTAMINATION_FRACTION);
}
/**
* Returns true if there is some sample contamination present, false otherwise.
* @return {@code true} iff there is some sample contamination
*/
public boolean isSampleContaminationPresent() {
return (!Double.isNaN(CONTAMINATION_FRACTION) && CONTAMINATION_FRACTION > 0.0) || sampleContaminationWasLoaded;
}
//Needs to be here because it uses CONTAMINATION_FRACTION
private DefaultHashMap<String,Double> sampleContamination = new DefaultHashMap<String,Double>(CONTAMINATION_FRACTION);

View File

@ -1,58 +1,58 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.gatk.tools.walkers.haplotypecaller;
import org.apache.log4j.Logger;
import org.broadinstitute.gatk.tools.walkers.haplotypecaller.graphs.SeqGraph;
import org.broadinstitute.gatk.utils.activeregion.ActiveRegion;
import org.broadinstitute.gatk.utils.exceptions.GATKException;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.pairhmm.FlexibleHMM;
import org.broadinstitute.gatk.utils.pairhmm.FastLoglessPairHMM;
import org.broadinstitute.gatk.utils.pairhmm.FlexibleHMM;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import java.io.File;
@ -112,17 +112,16 @@ public class GraphBasedLikelihoodCalculationEngine implements ReadLikelihoodCalc
debugMode = debugHaplotypeGraphAndLikelihoods ? DebugMode.EXTRA_DEBUG : debug ? DebugMode.DEBUG : DebugMode.NONE;
}
@Override
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods(final AssemblyResultSet assemblyResultSet, final Map<String, List<GATKSAMRecord>> perSampleReadList) {
public ReadLikelihoods<Haplotype> computeReadLikelihoods(final AssemblyResultSet assemblyResultSet, final List<String> samples, final Map<String, List<GATKSAMRecord>> perSampleReadList) {
final GraphBasedLikelihoodCalculationEngineInstance graphLikelihoodEngine =
new GraphBasedLikelihoodCalculationEngineInstance(assemblyResultSet,
hmm,log10GlobalReadMismappingRate,heterogeneousKmerSizeResolution);
final List<Haplotype> haplotypes = assemblyResultSet.getHaplotypeList();
final List<Haplotype> supportedHaplotypes = graphLikelihoodEngine.getHaplotypeList();
if (supportedHaplotypes.size() != haplotypes.size()) logger.warn("Some haplotypes were drop due to missing route on the graph (supported / all): " + supportedHaplotypes.size() + "/" + haplotypes.size());
final Map<String,PerReadAlleleLikelihoodMap> result = graphLikelihoodEngine.computeReadLikelihoods(supportedHaplotypes,
perSampleReadList );
if (supportedHaplotypes.size() != haplotypes.size())
logger.warn("Some haplotypes were drop due to missing route on the graph (supported / all): " + supportedHaplotypes.size() + "/" + haplotypes.size());
final ReadLikelihoods<Haplotype> result = graphLikelihoodEngine.computeReadLikelihoods(supportedHaplotypes,samples,perSampleReadList);
if (debugMode != DebugMode.NONE) graphLikelihoodDebugDumps(assemblyResultSet.getRegionForGenotyping(), graphLikelihoodEngine,result);
return result;
}
@ -131,7 +130,7 @@ public class GraphBasedLikelihoodCalculationEngine implements ReadLikelihoodCalc
* A few debug messages associated with the GraphBased likelihoods engine.
*/
private void graphLikelihoodDebugDumps(final ActiveRegion originalActiveRegion, final GraphBasedLikelihoodCalculationEngineInstance graphLikelihoodEngine,
final Map<String, PerReadAlleleLikelihoodMap> result) {
final ReadLikelihoods<Haplotype> result) {
if (graphLikelihoodEngine.hasCycles())
logger.debug("Resulting haplotype graph combining several kmer sizes has cycles");
else if (graphLikelihoodEngine.haplotypeGraph.hasNonReferenceEnds())
@ -144,14 +143,14 @@ public class GraphBasedLikelihoodCalculationEngine implements ReadLikelihoodCalc
sq.simplifyGraph();
sq.printGraph(new File(originalActiveRegion.getLocation() + "-" + graphLikelihoodEngine.getKmerSize() + "-haplotypeSeqGraph.dot"), 10000);
try {
FileWriter fw = new FileWriter(new File(originalActiveRegion.getLocation() + "-likelihoods.txt"));
PrintWriter pw = new PrintWriter(fw);
final FileWriter fw = new FileWriter(new File(originalActiveRegion.getLocation() + "-likelihoods.txt"));
final PrintWriter pw = new PrintWriter(fw);
//Note: we only output the first sample likelihoods, perhaps should output all of them but for debugging this is normally what is needed.
pw.println(result.entrySet().iterator().next().getValue().toString());
pw.println(result.sampleMatrix(0)); // need to actually implement a proper toString for the SampleMatrix.
pw.close();
fw.close();
} catch (Exception ex) {
throw new GATKException("", ex);
} catch (final Exception ex) {
throw new IllegalStateException("", ex);
}
}
}

View File

@ -57,6 +57,7 @@ import org.broadinstitute.gatk.utils.Utils;
import org.broadinstitute.gatk.utils.collections.CountSet;
import org.broadinstitute.gatk.utils.collections.Pair;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.pairhmm.FlexibleHMM;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
@ -215,6 +216,7 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
*
* @return {@code true} iff so.
*/
@SuppressWarnings("unused")
public boolean hasVariation() {
return hasVariation;
}
@ -231,27 +233,24 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
* @return never {@code null}, and with at least one entry for input sample (keys in {@code perSampleReadList}.
* The value maps can be potentially empty though.
*/
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods(
final List<Haplotype> haplotypes,
public ReadLikelihoods<Haplotype> computeReadLikelihoods(final List<Haplotype> haplotypes, final List<String> samples,
final Map<String, List<GATKSAMRecord>> perSampleReadList) {
// General preparation on the input haplotypes:
Collections.sort(haplotypes, Haplotype.ALPHANUMERICAL_COMPARATOR);
final Map<Haplotype, Allele> alleleVersions = new LinkedHashMap<>(haplotypes.size());
for (final Haplotype haplotype : haplotypes)
alleleVersions.put(haplotype, Allele.create(haplotype,haplotype.isReference()));
final ReadLikelihoods<Haplotype> result = new ReadLikelihoods<>(samples, haplotypes, perSampleReadList);
final List<Haplotype> sortedHaplotypes = new ArrayList<>(haplotypes);
Collections.sort(sortedHaplotypes, Haplotype.ALPHANUMERICAL_COMPARATOR);
// The actual work:
final HashMap<String, PerReadAlleleLikelihoodMap> result = new HashMap<>(perSampleReadList.size());
for (final Map.Entry<String, List<GATKSAMRecord>> e : perSampleReadList.entrySet()) {
final String sample = e.getKey();
final List<GATKSAMRecord> reads = e.getValue();
final Set<GATKSAMRecord> mayNeedAdjustment = new HashSet<>(reads.size());
final int sampleCount = result.sampleCount();
for (int s = 0; s < sampleCount; s++) {
final List<GATKSAMRecord> sampleReads = result.sampleReads(s);
// Get the cost/likelihood of each read at relevant subpaths on the tree:
final Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> costsByEndingVertex = calculatePathCostsByRead(reads, mayNeedAdjustment);
final Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> costsByEndingVertex = calculatePathCostsByRead(sampleReads);
// Create the resulting per-read maps:
final PerReadAlleleLikelihoodMap prallm = calculatePerReadAlleleLikelihoodMap(haplotypes, costsByEndingVertex, alleleVersions);
result.put(sample, prallm);
calculatePerReadAlleleLikelihoodMap(costsByEndingVertex,result.sampleMatrix(s) );
}
result.normalizeLikelihoods(true,log10globalReadMismappingRate);
logger.debug("Likelihood analysis summary: reads anchored " + anchoredReads + "/" + (anchoredReads + nonAnchoredReads) + "");
return result;
}
@ -263,8 +262,7 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
* @param fileName name of the output file.
*/
public void printGraph(final String fileName) {
if (haplotypeGraph != null)
haplotypeGraph.printGraph(fileName);
haplotypeGraph.printGraph(fileName);
}
/**
@ -281,36 +279,24 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
*
* @return {@code true} iff so.
*/
@SuppressWarnings("unused")
public boolean hasCycles() {
// It is set to null if it contained cycles.
return haplotypeGraph == null;
return haplotypeGraph.hasCycles();
}
/**
* Builds the result per-read allele likelihood map.
*
* @param haplotypes haplotypes to process.
* @param costsEndingByVertex Read vs haplotype graph subpaths cost indexed by ending vertex.
* @param alleleVersions map between haplotypes and the corresponding allele.
* @return never {@code null} although perhaps empty.
* @param costsEndingByVertex Read vs haplotype graph sub-paths cost indexed by ending vertex.
* @param likelihoods matrix where to set the likelihoods where the first index in the haplotype's and the second
* the read.
*/
protected PerReadAlleleLikelihoodMap calculatePerReadAlleleLikelihoodMap(
final Collection<Haplotype> haplotypes,
final Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> costsEndingByVertex, final Map<Haplotype, Allele> alleleVersions) {
final PerReadAlleleLikelihoodMap result = new PerReadAlleleLikelihoodMap();
if (haplotypeGraph == null)
return result;
final Map<GATKSAMRecord, Double> maxAlleleLogLk = new HashMap<>(anchoredReads + nonAnchoredReads + 10);
final Set<Haplotype> supportedHaplotypes = new LinkedHashSet<>(haplotypeGraph.getHaplotypes());
supportedHaplotypes.retainAll(haplotypes);
for (final Haplotype haplotype : supportedHaplotypes)
calculatePerReadAlleleLikelihoodMapHaplotypeProcessing(haplotype, alleleVersions, result, maxAlleleLogLk, costsEndingByVertex);
makeLikelihoodAdjustment(alleleVersions, result, maxAlleleLogLk.keySet(), maxAlleleLogLk);
applyGlobalReadMismappingRate(alleleVersions, result, maxAlleleLogLk);
return result;
protected void calculatePerReadAlleleLikelihoodMap(final Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> costsEndingByVertex,
final ReadLikelihoods.Matrix<Haplotype> likelihoods) {
final int alleleCount = likelihoods.alleleCount();
for (int h = 0; h < alleleCount; h++)
calculatePerReadAlleleLikelihoodMapHaplotypeProcessing(h, likelihoods, costsEndingByVertex);
}
/**
@ -322,25 +308,24 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
* "likelihood".
* </p>
*
* @param haplotype the target haplotype
* @param alleleVersions allele version of the haplotypes. These are the ones to be used in the final output.
* @param result target where to add the read-vs-haplotype likelihoods.
* @param maxAlleleLogLk where to place the maximum likelihood achieve on any haplotype for each read.
* @param haplotypeIndex the target haplotype index in the {@code likelihoods} matrix.
* @param likelihoods matrix of likelihoods.
* @param costsEndingByVertex read costs assorted by their end vertex.
*/
private void calculatePerReadAlleleLikelihoodMapHaplotypeProcessing(final Haplotype haplotype,
final Map<Haplotype, Allele> alleleVersions,
final PerReadAlleleLikelihoodMap result,
final Map<GATKSAMRecord, Double> maxAlleleLogLk,
private void calculatePerReadAlleleLikelihoodMapHaplotypeProcessing(final int haplotypeIndex,
final ReadLikelihoods.Matrix<Haplotype> likelihoods,
final Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> costsEndingByVertex) {
final Haplotype haplotype = likelihoods.allele(haplotypeIndex);
final HaplotypeRoute haplotypeRoute = haplotypeGraph.getHaplotypeRoute(haplotype);
final Set<MultiDeBruijnVertex> haplotypeVertices = haplotypeRoute.vertexSet();
final Map<GATKSAMRecord, ReadCost> readCostByRead = new HashMap<>();
final Set<MultiDeBruijnVertex> visitedVertices = new HashSet<>(haplotypeVertices.size());
final List<MultiSampleEdge> edgeList = haplotypeRoute.getEdges();
MultiDeBruijnVertex currentVertex = haplotypeRoute.getFirstVertex();
Route<MultiDeBruijnVertex, MultiSampleEdge> pathSoFar = new Route<>(currentVertex, haplotypeGraph);
final Iterator<MultiSampleEdge> edgeIterator = edgeList.iterator();
while (true) {
visitedVertices.add(currentVertex);
final Set<ReadSegmentCost> finishingAtElementCostSet = costsEndingByVertex.get(currentVertex);
@ -351,15 +336,12 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
currentVertex = pathSoFar.getLastVertex();
}
final List<ReadCost> readCosts = new ArrayList<>(readCostByRead.values());
Collections.sort(readCosts, ReadCost.COMPARATOR);
for (final ReadCost rc : readCosts)
result.add(rc.read, alleleVersions.get(haplotype), rc.getCost());
for (final ReadCost rc : readCosts) {
final Double currentMax = maxAlleleLogLk.get(rc.read);
if (currentMax == null || currentMax < rc.getCost())
maxAlleleLogLk.put(rc.read, rc.getCost());
int readIndex = 0;
for (final GATKSAMRecord read : likelihoods.reads()) {
final ReadCost rc = readCostByRead.get(read);
//if (rc != null)
likelihoods.set(haplotypeIndex,readIndex,rc == null ? Double.NEGATIVE_INFINITY : rc.getCost());
readIndex++;
}
}
@ -443,33 +425,6 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
}
}
/**
* Makes sure that the reference allele likelihood is not too much smaller that the best alternative allele.
* The justification of this constraint is explained in
* {@link PairHMMLikelihoodCalculationEngine#computeDiploidHaplotypeLikelihoods}.
*
* @param alleleVersions correspondence between input haplotypes and output alleles.
* @param result the target result map.
* @param maxAlleleLogLk for each read indicates the likelihood of the best alternative allele.
*/
private void applyGlobalReadMismappingRate(final Map<Haplotype, Allele> alleleVersions,
final PerReadAlleleLikelihoodMap result,
final Map<GATKSAMRecord, Double> maxAlleleLogLk) {
if (!Double.isNaN(log10globalReadMismappingRate) && !Double.isInfinite(log10globalReadMismappingRate)) {
final Allele referenceAllele = alleleVersions.get(haplotypeGraph.getReferenceHaplotype());
for (final Map.Entry<GATKSAMRecord, Map<Allele, Double>> entry : result.getLikelihoodReadMap().entrySet()) {
final GATKSAMRecord read = entry.getKey();
final Map<Allele, Double> likelihoods = entry.getValue();
final Double maxLogLk = maxAlleleLogLk.get(read);
if (maxAlleleLogLk == null) continue;
final Double referenceLogLk = likelihoods.get(referenceAllele);
final Double minReferenceLogLk = maxLogLk + log10globalReadMismappingRate;
if (referenceLogLk == null || referenceLogLk < minReferenceLogLk)
likelihoods.put(referenceAllele, minReferenceLogLk);
}
}
}
/**
* Calculates path costs for a set of reads.
* <p/>
@ -479,17 +434,16 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
* likelihood (cost) of traversing a possible path across the event block using that read.
* </p>
*
* @param reads reads to analyze.
* @param mayNeedAdjustment set where to add reads whose likelihood might need adjustment.
* @param reads reads to analyze.
* @return never {@code null}.
*/
protected Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> calculatePathCostsByRead(
final List<GATKSAMRecord> reads, final Set<GATKSAMRecord> mayNeedAdjustment) {
final List<GATKSAMRecord> reads) {
final Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> result = new HashMap<>(reads.size());
if (!hasVariation)
return Collections.emptyMap();
for (final GATKSAMRecord r : reads) {
calculatePathCostsByRead(r, mayNeedAdjustment, result);
calculatePathCostsByRead(r, result);
}
return result;
}
@ -498,10 +452,9 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
* Calculates path cost for a single read.
*
* @param read target read.
* @param mayNeedAdjustment set where to add read whose likelihood might need adjustment.
* @param result map where to add the result.
*/
private void calculatePathCostsByRead(final GATKSAMRecord read, final Set<GATKSAMRecord> mayNeedAdjustment,
private void calculatePathCostsByRead(final GATKSAMRecord read,
final Map<MultiDeBruijnVertex, Set<ReadSegmentCost>> result) {
final ReadAnchoring anchoring = new ReadAnchoring(read,haplotypeGraph);
@ -510,14 +463,11 @@ public class GraphBasedLikelihoodCalculationEngineInstance {
if (!anchoring.isAnchoredSomewhere()) {
defaultToRegularPairHMM(anchoring, result);
nonAnchoredReads++;
return;
} else {
calculateReadSegmentCosts(anchoring, hmm, result);
if (!anchoring.isPerfectAnchoring()) danglingEndPathCosts(anchoring, hmm, result);
anchoredReads++;
}
calculateReadSegmentCosts(anchoring, hmm, result);
if (!anchoring.isPerfectAnchoring()) danglingEndPathCosts(anchoring, hmm, result);
mayNeedAdjustment.add(read);
anchoredReads++;
}
/**

View File

@ -48,7 +48,9 @@ package org.broadinstitute.gatk.tools.walkers.haplotypecaller;
import com.google.java.contract.Ensures;
import htsjdk.samtools.SAMFileWriter;
import org.broadinstitute.gatk.utils.commandline.*;
import htsjdk.variant.variantcontext.*;
import htsjdk.variant.variantcontext.writer.VariantContextWriter;
import htsjdk.variant.vcf.*;
import org.broadinstitute.gatk.engine.CommandLineGATK;
import org.broadinstitute.gatk.engine.arguments.DbsnpArgumentCollection;
import org.broadinstitute.gatk.engine.contexts.AlignmentContext;
@ -75,11 +77,12 @@ import org.broadinstitute.gatk.utils.activeregion.ActiveRegion;
import org.broadinstitute.gatk.utils.activeregion.ActiveRegionReadState;
import org.broadinstitute.gatk.utils.activeregion.ActivityProfileState;
import org.broadinstitute.gatk.utils.clipping.ReadClipper;
import org.broadinstitute.gatk.utils.commandline.*;
import org.broadinstitute.gatk.utils.exceptions.UserException;
import org.broadinstitute.gatk.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.gatk.utils.fragments.FragmentCollection;
import org.broadinstitute.gatk.utils.fragments.FragmentUtils;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.gga.GenotypingGivenAllelesUtils;
import org.broadinstitute.gatk.utils.gvcf.GVCFWriter;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
@ -89,12 +92,10 @@ import org.broadinstitute.gatk.utils.haplotypeBAMWriter.HaplotypeBAMWriter;
import org.broadinstitute.gatk.utils.help.DocumentedGATKFeature;
import org.broadinstitute.gatk.utils.help.HelpConstants;
import org.broadinstitute.gatk.utils.pairhmm.PairHMM;
import org.broadinstitute.gatk.utils.sam.AlignmentUtils;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import org.broadinstitute.gatk.utils.sam.ReadUtils;
import org.broadinstitute.gatk.utils.variant.GATKVCFIndexType;
import htsjdk.variant.variantcontext.*;
import htsjdk.variant.variantcontext.writer.VariantContextWriter;
import htsjdk.variant.vcf.*;
import org.broadinstitute.gatk.utils.variant.HomoSapiensConstants;
import java.io.FileNotFoundException;
@ -932,12 +933,12 @@ public class HaplotypeCaller extends ActiveRegionWalker<List<VariantContext>, In
final Map<String,List<GATKSAMRecord>> reads = splitReadsBySample( regionForGenotyping.getReads() );
// Calculate the likelihoods: CPU intesive part.
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = likelihoodCalculationEngine.computeReadLikelihoods(assemblyResult,reads);
final ReadLikelihoods<Haplotype> readLikelihoods =
likelihoodCalculationEngine.computeReadLikelihoods(assemblyResult,samplesList,reads);
// Realign all the reads to the most likely haplotype for use by the annotations
for( final Map.Entry<String, PerReadAlleleLikelihoodMap> entry : stratifiedReadMap.entrySet() ) {
entry.getValue().realignReadsToMostLikelyHaplotype(haplotypes, assemblyResult.getPaddedReferenceLoc());
}
// Realign reads to their best haplotype.
final Map<GATKSAMRecord,GATKSAMRecord> readRealignments = realignReadsToTheirBestHaplotype(readLikelihoods, assemblyResult.getPaddedReferenceLoc());
readLikelihoods.changeReads(readRealignments);
// Note: we used to subset down at this point to only the "best" haplotypes in all samples for genotyping, but there
// was a bad interaction between that selection and the marginalization that happens over each event when computing
@ -947,7 +948,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<List<VariantContext>, In
final HaplotypeCallerGenotypingEngine.CalledHaplotypes calledHaplotypes = genotypingEngine.assignGenotypeLikelihoods(
haplotypes,
stratifiedReadMap,
readLikelihoods,
perSampleFilteredReadList,
assemblyResult.getFullReferenceWithPadding(),
assemblyResult.getPaddedReferenceLoc(),
@ -964,7 +965,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<List<VariantContext>, In
assemblyResult.getPaddedReferenceLoc(),
haplotypes,
calledHaplotypes.getCalledHaplotypes(),
stratifiedReadMap);
readLikelihoods);
}
if( SCAC.DEBUG ) { logger.info("----------------------------------------------------------------------------------"); }
@ -982,15 +983,36 @@ public class HaplotypeCaller extends ActiveRegionWalker<List<VariantContext>, In
// output variant containing region.
result.addAll(referenceConfidenceModel.calculateRefConfidence(assemblyResult.getReferenceHaplotype(),
calledHaplotypes.getCalledHaplotypes(), assemblyResult.getPaddedReferenceLoc(), regionForGenotyping,
stratifiedReadMap, calledHaplotypes.getCalls()));
readLikelihoods, calledHaplotypes.getCalls()));
// output right-flanking non-variant section:
if (trimmingResult.hasRightFlankingRegion())
result.addAll(referenceModelForNoVariation(trimmingResult.nonVariantRightFlankRegion(),false));
return result;
}
} else {
} else
return calledHaplotypes.getCalls();
}
/**
* Returns a map with the original read as a key and the realigned read as the value.
* <p>
* Missing keys or equivalent key and value pairs mean that the read was not realigned.
* </p>
* @return never {@code null}
*/
private Map<GATKSAMRecord,GATKSAMRecord> realignReadsToTheirBestHaplotype(final ReadLikelihoods<Haplotype> originalReadLikelihoods, final GenomeLoc paddedReferenceLoc) {
final Collection<ReadLikelihoods<Haplotype>.BestAllele> bestAlleles = originalReadLikelihoods.bestAlleles();
final Map<GATKSAMRecord,GATKSAMRecord> result = new HashMap<>(bestAlleles.size());
for (final ReadLikelihoods<Haplotype>.BestAllele bestAllele : bestAlleles) {
final GATKSAMRecord originalRead = bestAllele.read;
final Haplotype bestHaplotype = bestAllele.allele;
final boolean isInformative = bestAllele.isInformative();
final GATKSAMRecord realignedRead = AlignmentUtils.createReadAlignedToRef(originalRead,bestHaplotype,paddedReferenceLoc.getStart(),isInformative);
result.put(originalRead,realignedRead);
}
return result;
}
private boolean containsCalls(final HaplotypeCallerGenotypingEngine.CalledHaplotypes calledHaplotypes) {
@ -1086,23 +1108,15 @@ public class HaplotypeCaller extends ActiveRegionWalker<List<VariantContext>, In
* @param region the active region containing reads
* @return a map from sample -> PerReadAlleleLikelihoodMap that maps each read to ref
*/
public static Map<String, PerReadAlleleLikelihoodMap> createDummyStratifiedReadMap(final Haplotype refHaplotype,
final List<String> samples,
final ActiveRegion region) {
final Allele refAllele = Allele.create(refHaplotype, true);
public static ReadLikelihoods<Haplotype> createDummyStratifiedReadMap(final Haplotype refHaplotype,
final List<String> samples,
final ActiveRegion region) {
return new ReadLikelihoods<>(samples, Collections.singletonList(refHaplotype),
splitReadsBySample(samples, region.getReads()));
final Map<String, PerReadAlleleLikelihoodMap> map = new LinkedHashMap<>(1);
for ( final Map.Entry<String, List<GATKSAMRecord>> entry : splitReadsBySample(samples, region.getReads()).entrySet() ) {
final PerReadAlleleLikelihoodMap likelihoodMap = new PerReadAlleleLikelihoodMap();
for ( final GATKSAMRecord read : entry.getValue() ) {
likelihoodMap.add(read, refAllele, 0.0);
}
map.put(entry.getKey(), likelihoodMap);
}
return map;
}
//---------------------------------------------------------------------------------------------------------------
//
// reduce

View File

@ -57,8 +57,7 @@ import org.broadinstitute.gatk.tools.walkers.genotyper.OutputMode;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.GenomeLocParser;
import org.broadinstitute.gatk.utils.Utils;
import org.broadinstitute.gatk.utils.collections.DefaultHashMap;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.EventMap;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.haplotype.MergeVariantsAcrossHaplotypes;
@ -74,7 +73,7 @@ import java.util.*;
public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeCallerArgumentCollection> {
private final static List<Allele> NO_CALL = Collections.singletonList(Allele.NO_CALL);
private final static int ALLELE_EXTENSION = 2;
private static final int ALLELE_EXTENSION = 2;
private MergeVariantsAcrossHaplotypes crossHaplotypeEventMerger;
@ -161,17 +160,17 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
* Main entry point of class - given a particular set of haplotypes, samples and reference context, compute
* genotype likelihoods and assemble into a list of variant contexts and genomic events ready for calling
*
* The list of samples we're working with is obtained from the haplotypeReadMap
* The list of samples we're working with is obtained from the readLikelihoods
*
* @param haplotypes Haplotypes to assign likelihoods to
* @param haplotypeReadMap Map from reads->(haplotypes,likelihoods)
* @param readLikelihoods Map from reads->(haplotypes,likelihoods)
* @param perSampleFilteredReadList Map from sample to reads that were filtered after assembly and before calculating per-read likelihoods.
* @param ref Reference bytes at active region
* @param refLoc Corresponding active region genome location
* @param activeRegionWindow Active window
* @param genomeLocParser GenomeLocParser
* @param activeAllelesToGenotype Alleles to genotype
* @param emitReferenceConfidence whether we should add a <NON_REF></NON_REF> alternative allele to the result variation contexts.
* @param emitReferenceConfidence whether we should add a &lt;NON_REF&gt; alternative allele to the result variation contexts.
*
* @return A CalledHaplotypes object containing a list of VC's with genotyped events and called haplotypes
*
@ -180,7 +179,7 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
@Ensures("result != null")
// TODO - can this be refactored? this is hard to follow!
public CalledHaplotypes assignGenotypeLikelihoods( final List<Haplotype> haplotypes,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final ReadLikelihoods<Haplotype> readLikelihoods,
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList,
final byte[] ref,
final GenomeLoc refLoc,
@ -191,7 +190,7 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
final boolean emitReferenceConfidence) {
// sanity check input arguments
if (haplotypes == null || haplotypes.isEmpty()) throw new IllegalArgumentException("haplotypes input should be non-empty and non-null, got "+haplotypes);
if (haplotypeReadMap == null || haplotypeReadMap.isEmpty()) throw new IllegalArgumentException("haplotypeReadMap input should be non-empty and non-null, got "+haplotypeReadMap);
if (readLikelihoods == null || readLikelihoods.sampleCount() == 0) throw new IllegalArgumentException("readLikelihoods input should be non-empty and non-null, got "+readLikelihoods);
if (ref == null || ref.length == 0 ) throw new IllegalArgumentException("ref bytes input should be non-empty and non-null, got " + Arrays.toString(ref));
if (refLoc == null || refLoc.size() != ref.length) throw new IllegalArgumentException(" refLoc must be non-null and length must match ref bytes, got "+refLoc);
if (activeRegionWindow == null ) throw new IllegalArgumentException("activeRegionWindow must be non-null, got "+activeRegionWindow);
@ -200,12 +199,11 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
// update the haplotypes so we're ready to call, getting the ordered list of positions on the reference
// that carry events among the haplotypes
final TreeSet<Integer> startPosKeySet = decomposeHaplotypesIntoVariantContexts(haplotypes, haplotypeReadMap, ref, refLoc, activeAllelesToGenotype);
final TreeSet<Integer> startPosKeySet = decomposeHaplotypesIntoVariantContexts(haplotypes, readLikelihoods, ref, refLoc, activeAllelesToGenotype);
// Walk along each position in the key set and create each event to be outputted
final Set<Haplotype> calledHaplotypes = new HashSet<>();
final List<VariantContext> returnCalls = new ArrayList<>();
final Map<String, Double> emptyDownSamplingMap = new DefaultHashMap<>(0.0);
for( final int loc : startPosKeySet ) {
if( loc >= activeRegionWindow.getStart() && loc <= activeRegionWindow.getStop() ) { // genotyping an event inside this active region
@ -221,7 +219,9 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
// Merge the event to find a common reference representation
VariantContext mergedVC = GATKVariantContextUtils.simpleMerge(eventsAtThisLoc, priorityList, GATKVariantContextUtils.FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED, GATKVariantContextUtils.GenotypeMergeType.PRIORITIZE, false, false, null, false, false);
VariantContext mergedVC = GATKVariantContextUtils.simpleMerge(eventsAtThisLoc, priorityList,
GATKVariantContextUtils.FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED,
GATKVariantContextUtils.GenotypeMergeType.PRIORITIZE, false, false, null, false, false);
final VariantContextBuilder vcb = new VariantContextBuilder(mergedVC);
@ -250,22 +250,25 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
if (logger != null) logger.info("Genotyping event at " + loc + " with alleles = " + mergedVC.getAlleles());
}
final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap = convertHaplotypeReadMapToAlleleReadMap(haplotypeReadMap, alleleMapper, configuration.getSampleContamination(), genomeLocParser, mergedVC);
ReadLikelihoods<Allele> readAlleleLikelihoods = readLikelihoods.marginalize(alleleMapper,genomeLocParser.createPaddedGenomeLoc(genomeLocParser.createGenomeLoc(mergedVC),ALLELE_EXTENSION));
if (configuration.isSampleContaminationPresent())
readAlleleLikelihoods.contaminationDownsampling(configuration.getSampleContamination());
if (emitReferenceConfidence) addMiscellaneousAllele(alleleReadMap);
final GenotypesContext genotypes = calculateGLsForThisEvent( alleleReadMap, mergedVC );
final VariantContext call = calculateGenotypes(null, null, null, null, new VariantContextBuilder(mergedVC).genotypes(genotypes).make(), calculationModel, false, null);
if (emitReferenceConfidence)
readAlleleLikelihoods.addNonReferenceAllele(GATKVariantContextUtils.NON_REF_SYMBOLIC_ALLELE);
final GenotypesContext genotypes = calculateGLsForThisEvent( readAlleleLikelihoods, mergedVC );
final VariantContext call = calculateGenotypes(null,null,null,null,new VariantContextBuilder(mergedVC).genotypes(genotypes).make(), calculationModel, false,null);
if( call != null ) {
final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap_annotations = ( configuration.USE_FILTERED_READ_MAP_FOR_ANNOTATIONS ? alleleReadMap :
convertHaplotypeReadMapToAlleleReadMap( haplotypeReadMap, alleleMapper, emptyDownSamplingMap, genomeLocParser, null ) );
if (emitReferenceConfidence) addMiscellaneousAllele(alleleReadMap_annotations);
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = addFilteredReadList(genomeLocParser, alleleReadMap_annotations, perSampleFilteredReadList, call, true);
VariantContext annotatedCall = annotationEngine.annotateContextForActiveRegion(tracker, stratifiedReadMap, call);
readAlleleLikelihoods = prepareReadAlleleLikelihoodsForAnnotation(readLikelihoods, perSampleFilteredReadList,
genomeLocParser, emitReferenceConfidence, alleleMapper, readAlleleLikelihoods, call);
VariantContext annotatedCall = annotationEngine.annotateContextForActiveRegion(tracker,readAlleleLikelihoods, call);
if( call.getAlleles().size() != mergedVC.getAlleles().size() )
annotatedCall = GATKVariantContextUtils.reverseTrimAlleles(annotatedCall);
annotatedCall = GATKVariantContextUtils.reverseTrimAlleles(annotatedCall);
// maintain the set of all called haplotypes
for ( final Allele calledAllele : call.getAlleles() ) {
@ -282,64 +285,91 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
return new CalledHaplotypes(returnCalls, calledHaplotypes);
}
/**
* Add the <NON_REF> allele
* @param stratifiedReadMap target per-read-allele-likelihood-map.
*/
public static Map<String, PerReadAlleleLikelihoodMap> addMiscellaneousAllele(final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap) {
final Allele miscellanoeusAllele = GATKVariantContextUtils.NON_REF_SYMBOLIC_ALLELE;
for (Map.Entry<String, PerReadAlleleLikelihoodMap> perSample : stratifiedReadMap.entrySet()) {
for (Map.Entry<GATKSAMRecord, Map<Allele, Double>> perRead : perSample.getValue().getLikelihoodReadMap().entrySet()) {
double bestLikelihood = Double.NEGATIVE_INFINITY;
double secondBestLikelihood = Double.NEGATIVE_INFINITY;
for (Map.Entry<Allele,Double> perAllele : perRead.getValue().entrySet()) {
final double value = perAllele.getValue();
if (value > bestLikelihood) {
secondBestLikelihood = bestLikelihood;
bestLikelihood = value;
} else if (value < bestLikelihood && value > secondBestLikelihood) {
secondBestLikelihood = value;
}
}
final double miscellanousLikelihood = Double.isInfinite(secondBestLikelihood) ? bestLikelihood : secondBestLikelihood;
perSample.getValue().add(perRead.getKey(),miscellanoeusAllele,miscellanousLikelihood);
}
// Builds the read-likelihoods collection to use for annotation considering user arguments and the collection
// used for genotyping.
private ReadLikelihoods<Allele> prepareReadAlleleLikelihoodsForAnnotation(
final ReadLikelihoods<Haplotype> readHaplotypeLikelihoods,
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList,
final GenomeLocParser genomeLocParser,
final boolean emitReferenceConfidence,
final Map<Allele, List<Haplotype>> alleleMapper,
final ReadLikelihoods<Allele> readAlleleLikelihoodsForGenotyping,
final VariantContext call) {
final ReadLikelihoods<Allele> readAlleleLikelihoodsForAnnotations;
final GenomeLoc loc = genomeLocParser.createGenomeLoc(call);
// We can reuse for annotation the likelihood for genotyping as long as there is no contamination filtering
// or the user want to use the contamination filtered set for annotations.
// Otherwise (else part) we need to do it again.
if (configuration.USE_FILTERED_READ_MAP_FOR_ANNOTATIONS || !configuration.isSampleContaminationPresent()) {
readAlleleLikelihoodsForAnnotations = readAlleleLikelihoodsForGenotyping;
readAlleleLikelihoodsForAnnotations.filterToOnlyOverlappingUnclippedReads(loc);
} else {
readAlleleLikelihoodsForAnnotations = readHaplotypeLikelihoods.marginalize(alleleMapper, loc);
if (emitReferenceConfidence)
readAlleleLikelihoodsForAnnotations.addNonReferenceAllele(
GATKVariantContextUtils.NON_REF_SYMBOLIC_ALLELE);
}
return stratifiedReadMap;
// Skim the filtered map based on the location so that we do not add filtered read that are going to be removed
// right after a few lines of code bellow.
final Map<String, List<GATKSAMRecord>> overlappingFilteredReads = overlappingFilteredReads(perSampleFilteredReadList, loc);
readAlleleLikelihoodsForAnnotations.addReads(overlappingFilteredReads,0);
return readAlleleLikelihoodsForAnnotations;
}
private Map<String, List<GATKSAMRecord>> overlappingFilteredReads(final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList, final GenomeLoc loc) {
final Map<String,List<GATKSAMRecord>> overlappingFilteredReads = new HashMap<>(perSampleFilteredReadList.size());
for (final Map.Entry<String,List<GATKSAMRecord>> sampleEntry : perSampleFilteredReadList.entrySet()) {
final List<GATKSAMRecord> originalList = sampleEntry.getValue();
final String sample = sampleEntry.getKey();
if (originalList == null || originalList.size() == 0)
continue;
final List<GATKSAMRecord> newList = new ArrayList<>(originalList.size());
for (final GATKSAMRecord read : originalList) {
if (ReadLikelihoods.unclippedReadOverlapsRegion(read, loc))
newList.add(read);
}
if (newList.size() == 0)
continue;
overlappingFilteredReads.put(sample,newList);
} return overlappingFilteredReads;
}
/**
* Go through the haplotypes we assembled, and decompose them into their constituent variant contexts
*
* @param haplotypes the list of haplotypes we're working with
* @param haplotypeReadMap map from samples -> the per read allele likelihoods
* @param readLikelihoods map from samples -> the per read allele likelihoods
* @param ref the reference bases (over the same interval as the haplotypes)
* @param refLoc the span of the reference bases
* @param activeAllelesToGenotype alleles we want to ensure are scheduled for genotyping (GGA mode)
* @return never {@code null} but perhaps an empty list if there is no variants to report.
*/
private TreeSet<Integer> decomposeHaplotypesIntoVariantContexts(final List<Haplotype> haplotypes,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final ReadLikelihoods readLikelihoods,
final byte[] ref,
final GenomeLoc refLoc,
final List<VariantContext> activeAllelesToGenotype) {
final boolean in_GGA_mode = !activeAllelesToGenotype.isEmpty();
// Using the cigar from each called haplotype to figure out what events need to be written out in a VCF file
// Using the cigar from each called haplotype figure out what events need to be written out in a VCF file
final TreeSet<Integer> startPosKeySet = EventMap.buildEventMapsForHaplotypes(haplotypes, ref, refLoc, configuration.DEBUG);
if ( in_GGA_mode ) startPosKeySet.clear();
//cleanUpSymbolicUnassembledEvents( haplotypes ); // We don't make symbolic alleles so this isn't needed currently
if ( !in_GGA_mode ) {
// run the event merger if we're not in GGA mode
if (crossHaplotypeEventMerger == null)
throw new IllegalStateException(" no variant merger was provided at set-up when needed in GGA mode");
final boolean mergedAnything = crossHaplotypeEventMerger.merge(haplotypes, haplotypeReadMap, startPosKeySet, ref, refLoc);
final boolean mergedAnything = crossHaplotypeEventMerger.merge(haplotypes, readLikelihoods, startPosKeySet, ref, refLoc);
if ( mergedAnything )
cleanUpSymbolicUnassembledEvents( haplotypes ); // the newly created merged events could be overlapping the unassembled events
}
else {
} else {
startPosKeySet.clear();
for( final VariantContext compVC : activeAllelesToGenotype ) {
startPosKeySet.add( compVC.getStart() );
}
@ -406,65 +436,31 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
/**
* For a particular event described in inputVC, form PL vector for each sample by looking into allele read map and filling likelihood matrix for each allele
* @param alleleReadMap Allele map describing mapping from reads to alleles and corresponding likelihoods
* @param readLikelihoods Allele map describing mapping from reads to alleles and corresponding likelihoods
* @param mergedVC Input VC with event to genotype
* @return GenotypesContext object wrapping genotype objects with PLs
*/
@Requires({"alleleReadMap!= null", "mergedVC != null"})
@Requires({"readLikelihoods!= null", "mergedVC != null"})
@Ensures("result != null")
private GenotypesContext calculateGLsForThisEvent( final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap, final VariantContext mergedVC ) {
final GenotypesContext genotypes = GenotypesContext.create(alleleReadMap.size());
private GenotypesContext calculateGLsForThisEvent( final ReadLikelihoods<Allele> readLikelihoods, final VariantContext mergedVC ) {
final GenotypesContext genotypes = GenotypesContext.create(readLikelihoods.sampleCount());
// Grab the genotype likelihoods from the appropriate places in the haplotype likelihood matrix -- calculation performed independently per sample
for( final String sample : alleleReadMap.keySet() ) {
for (final String sample : readLikelihoods.samples() ) {
final int numHaplotypes = mergedVC.getAlleles().size();
final double[] genotypeLikelihoods = new double[numHaplotypes * (numHaplotypes+1) / 2];
final double[][] haplotypeLikelihoodMatrix = PairHMMLikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods(sample, alleleReadMap, mergedVC.getAlleles(), true);
final double[][] haplotypeLikelihoodMatrix = PairHMMLikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods(sample, readLikelihoods, mergedVC.getAlleles(), true);
int glIndex = 0;
for( int iii = 0; iii < numHaplotypes; iii++ ) {
for( int jjj = 0; jjj <= iii; jjj++ ) {
genotypeLikelihoods[glIndex++] = haplotypeLikelihoodMatrix[iii][jjj]; // for example: AA,AB,BB,AC,BC,CC
}
}
logger.debug(" Likelihoods for sample " + sample + " : " + Arrays.toString(genotypeLikelihoods));
genotypes.add(new GenotypeBuilder(sample).alleles(NO_CALL).PL(genotypeLikelihoods).make());
}
return genotypes;
}
private static Map<String, PerReadAlleleLikelihoodMap> addFilteredReadList(final GenomeLocParser parser,
final Map<String, PerReadAlleleLikelihoodMap> perSampleReadMap,
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList,
final VariantContext call,
final boolean requireOverlap) {
final Map<String, PerReadAlleleLikelihoodMap> returnMap = new LinkedHashMap<>();
final GenomeLoc callLoc = ( requireOverlap ? parser.createGenomeLoc(call) : null );
for( final Map.Entry<String, PerReadAlleleLikelihoodMap> sample : perSampleReadMap.entrySet() ) {
final PerReadAlleleLikelihoodMap likelihoodMap = new PerReadAlleleLikelihoodMap();
for( final Map.Entry<GATKSAMRecord,Map<Allele,Double>> mapEntry : sample.getValue().getLikelihoodReadMap().entrySet() ) {
// only count the read if it overlaps the event, otherwise it is not added to the output read list at all
if( !requireOverlap || callLoc.overlapsP(parser.createGenomeLocUnclipped(mapEntry.getKey())) ) {
for( final Map.Entry<Allele,Double> alleleDoubleEntry : mapEntry.getValue().entrySet() ) {
likelihoodMap.add(mapEntry.getKey(), alleleDoubleEntry.getKey(), alleleDoubleEntry.getValue());
}
}
}
// add all filtered reads to the NO_CALL list because they weren't given any likelihoods
for( final GATKSAMRecord read : perSampleFilteredReadList.get(sample.getKey()) ) {
// only count the read if it overlaps the event, otherwise it is not added to the output read list at all
if( !requireOverlap || callLoc.overlapsP(parser.createGenomeLocUnclipped(read)) ) {
for( final Allele allele : call.getAlleles() ) {
likelihoodMap.add(read, allele, 0.0);
}
}
}
returnMap.put(sample.getKey(), likelihoodMap);
}
return returnMap;
}
/**
* Removes symbolic events from list of haplotypes
* @param haplotypes Input/output list of haplotypes, before/after removal
@ -490,48 +486,6 @@ public class HaplotypeCallerGenotypingEngine extends GenotypingEngine<HaplotypeC
haplotypes.removeAll(haplotypesToRemove);
}
/**
* The reads, partioned by haplotype, must now be partioned by alleles.
* That is, some alleles are supported by multiple haplotypes and we marginalize over them by taking the max likelihood.
* In addition we subset down to only reads which overlap the alleles (plus a small extension)
* @param haplotypeReadMap Map from reads -> (haplotypes, likelihoods)
* @param alleleMapper Map from alleles -> list of haplotypes which support that allele
* @param perSampleDownsamplingFraction Map from samples -> downsampling fraction
* @param genomeLocParser a genome loc parser
* @param eventsToGenotype the alleles to genotype in a single VariantContext, will be null if we don't want to require overlap
* @return Map from reads -> (alleles, likelihoods)
*/
protected Map<String, PerReadAlleleLikelihoodMap> convertHaplotypeReadMapToAlleleReadMap( final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final Map<Allele, List<Haplotype>> alleleMapper,
final Map<String,Double> perSampleDownsamplingFraction,
final GenomeLocParser genomeLocParser,
final VariantContext eventsToGenotype) {
final GenomeLoc callLoc = ( eventsToGenotype != null ? genomeLocParser.createGenomeLoc(eventsToGenotype) : null );
final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap = new LinkedHashMap<>();
for( final Map.Entry<String, PerReadAlleleLikelihoodMap> haplotypeReadMapEntry : haplotypeReadMap.entrySet() ) { // for each sample
final PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap = new PerReadAlleleLikelihoodMap();
for( final Map.Entry<Allele, List<Haplotype>> alleleMapperEntry : alleleMapper.entrySet() ) { // for each output allele
final List<Haplotype> mappedHaplotypes = alleleMapperEntry.getValue();
for( final Map.Entry<GATKSAMRecord, Map<Allele,Double>> readEntry : haplotypeReadMapEntry.getValue().getLikelihoodReadMap().entrySet() ) { // for each read
if( eventsToGenotype == null || callLoc.overlapsP(genomeLocParser.createPaddedGenomeLoc(genomeLocParser.createGenomeLocUnclipped(readEntry.getKey()), ALLELE_EXTENSION)) ) { // make sure the read overlaps
double maxLikelihood = Double.NEGATIVE_INFINITY;
for( final Map.Entry<Allele,Double> alleleDoubleEntry : readEntry.getValue().entrySet() ) { // for each input allele
if( mappedHaplotypes.contains( new Haplotype(alleleDoubleEntry.getKey())) ) { // exact match of haplotype base string
maxLikelihood = Math.max( maxLikelihood, alleleDoubleEntry.getValue() );
}
}
perReadAlleleLikelihoodMap.add(readEntry.getKey(), alleleMapperEntry.getKey(), maxLikelihood);
}
}
}
perReadAlleleLikelihoodMap.performPerAlleleDownsampling(perSampleDownsamplingFraction.get(haplotypeReadMapEntry.getKey())); // perform contamination downsampling
alleleReadMap.put(haplotypeReadMapEntry.getKey(), perReadAlleleLikelihoodMap);
}
return alleleReadMap;
}
protected static Map<Allele, List<Haplotype>> createAlleleMapper( final Map<VariantContext, Allele> mergeMap, final Map<Event, List<Haplotype>> eventMap ) {
final Map<Allele, List<Haplotype>> alleleMapper = new LinkedHashMap<>();
for( final Map.Entry<VariantContext, Allele> entry : mergeMap.entrySet() ) {

View File

@ -53,7 +53,7 @@ import org.apache.log4j.Logger;
import org.broadinstitute.gatk.utils.MathUtils;
import org.broadinstitute.gatk.utils.QualityUtils;
import org.broadinstitute.gatk.utils.exceptions.UserException;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.pairhmm.*;
import org.broadinstitute.gatk.utils.recalibration.covariates.RepeatCovariate;
@ -73,6 +73,7 @@ public class PairHMMLikelihoodCalculationEngine implements ReadLikelihoodCalcula
public static final byte BASE_QUALITY_SCORE_THRESHOLD = (byte) 18; // Base quals less than this value are squashed down to min possible qual
private final byte constantGCP;
private final double log10globalReadMismappingRate;
private final PairHMM.HMM_IMPLEMENTATION hmmType;
@ -177,40 +178,6 @@ public class PairHMMLikelihoodCalculationEngine implements ReadLikelihoodCalcula
pairHMMThreadLocal.get().close();
}
private void writeDebugLikelihoods(final GATKSAMRecord processedRead, final Haplotype haplotype, final double log10l){
if ( WRITE_LIKELIHOODS_TO_FILE ) {
likelihoodsStream.printf("%s %s %s %s %s %s %f%n",
haplotype.getBaseString(),
new String(processedRead.getReadBases() ),
SAMUtils.phredToFastq(processedRead.getBaseQualities() ),
SAMUtils.phredToFastq(processedRead.getBaseInsertionQualities() ),
SAMUtils.phredToFastq(processedRead.getBaseDeletionQualities() ),
SAMUtils.phredToFastq(constantGCP),
log10l);
}
}
private Map<Allele, Haplotype> createAlleleMap(List<Haplotype> haplotypes){
final int numHaplotypes = haplotypes.size();
final Map<Allele, Haplotype> alleleMap = new LinkedHashMap<>(numHaplotypes);
for ( final Haplotype haplotype : haplotypes ) {
final Allele allele = Allele.create(haplotype, true);
alleleMap.put(allele, haplotype);
}
return alleleMap;
}
private Map<GATKSAMRecord, byte[]> fillGCPArrays(List<GATKSAMRecord> reads){
final Map<GATKSAMRecord, byte []> GCPArrayMap = new LinkedHashMap<>();
for (GATKSAMRecord read: reads){
byte [] GCPArray = new byte[read.getReadBases().length];
Arrays.fill( GCPArray, constantGCP ); // Is there a way to derive empirical estimates for this from the data?
GCPArrayMap.put(read, GCPArray);
}
return GCPArrayMap;
}
private void capMinimumReadQualities(GATKSAMRecord read, byte[] readQuals, byte[] readInsQuals, byte[] readDelQuals) {
for( int kkk = 0; kkk < readQuals.length; kkk++ ) {
readQuals[kkk] = (byte) Math.min( 0xff & readQuals[kkk], read.getMappingQuality()); // cap base quality by mapping quality, as in UG
@ -229,9 +196,9 @@ public class PairHMMLikelihoodCalculationEngine implements ReadLikelihoodCalcula
* @return processedReads. A new list of reads, in the same order, whose qualities have been altered by PCR error model and minimal quality thresholding
*/
private List<GATKSAMRecord> modifyReadQualities(final List<GATKSAMRecord> reads) {
List<GATKSAMRecord> processedReads = new LinkedList<>();
for ( GATKSAMRecord read : reads ) {
final List<GATKSAMRecord> result = new ArrayList<>(reads.size());
for (final GATKSAMRecord read : reads) {
final byte[] readBases = read.getReadBases();
// NOTE -- must clone anything that gets modified here so we don't screw up future uses of the read
@ -244,71 +211,9 @@ public class PairHMMLikelihoodCalculationEngine implements ReadLikelihoodCalcula
// Create a new copy of the read and sets its base qualities to the modified versions.
// Pack this into a new list for return
final GATKSAMRecord processedRead = GATKSAMRecord.createQualityModifiedRead(read, readBases, readQuals, readInsQuals, readDelQuals);
processedReads.add(processedRead);
result.add(GATKSAMRecord.createQualityModifiedRead(read, readBases, readQuals, readInsQuals, readDelQuals));
}
return processedReads;
}
/**
* Post-processing of the read/allele likelihoods.
*
* We send quality-capped reads to the pairHMM for evaluation, and it returns a map containing these capped reads.
* We wish to return a map containing the original, unmodified reads.
*
* At the same time, we want to effectively set a lower cap on the reference score, based on the global mis-mapping rate.
* This protects us from the case where the assembly has produced haplotypes
* that are very divergent from reference, but are supported by only one read. In effect
* we capping how badly scoring the reference can be for any read by the chance that the read
* itself just doesn't belong here
*
* @param perReadAlleleLikelihoodMap the original map returned by the PairHMM. Contains the processed reads, the haplotype Alleles, and their log10ls
* @param reads Our original, unmodified reads
* @param processedReads Reads whose minimum base,insertion,deletion qualities have been capped; these were actually used to derive log10ls
* @param alleleHaplotypeMap The map associating the Allele and Haplotype versions of each haplotype
*
* @return processedReadAlleleLikelihoodMap; a new PRALM containing the original reads, and their haplotype log10ls including capped reference log10ls
*/
private PerReadAlleleLikelihoodMap capReferenceHaplotypeLikelihoods(PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap, List<GATKSAMRecord> reads, List<GATKSAMRecord> processedReads, Map<Allele, Haplotype> alleleHaplotypeMap){
// a new read/allele map, to contain the uncapped reads, haplotypes, and potentially the capped reference log10ls
final PerReadAlleleLikelihoodMap processedReadAlleleLikelihoodMap = new PerReadAlleleLikelihoodMap();
final int numReads = reads.size();
for (int readIndex = 0; readIndex < numReads; readIndex++) {
// Get the original and quality-modified read from their respective lists
// Note that this requires both lists to have reads in the same order
final GATKSAMRecord originalRead = reads.get(readIndex);
final GATKSAMRecord processedRead = processedReads.get(readIndex);
double bestNonReflog10L = Double.NEGATIVE_INFINITY;
for ( final Allele allele : alleleHaplotypeMap.keySet() ) {
final double log10l = perReadAlleleLikelihoodMap.getLikelihoodAssociatedWithReadAndAllele(processedRead, allele);
final Haplotype haplotype = alleleHaplotypeMap.get(allele);
if ( haplotype.isNonReference() )
bestNonReflog10L = Math.max(bestNonReflog10L, log10l);
writeDebugLikelihoods(processedRead, haplotype, log10l);
// add the ORIGINAL (non-capped) read to the final map, along with the current haplotype and associated log10l
processedReadAlleleLikelihoodMap.add(originalRead, allele, log10l);
}
// ensure that any haplotype is no worse than the best non-ref haplotype minus the global
// mismapping rate. This protects us from the case where the assembly has produced haplotypes
// that are very divergent from reference, but are supported by only one read. In effect
// we capping how badly scoring any haplotype can be for any read by the chance that the read
// itself just doesn't belong here
final double worstAllowedLog10l = bestNonReflog10L + log10globalReadMismappingRate;
for ( final Allele allele : alleleHaplotypeMap.keySet() ) {
final double log10l = perReadAlleleLikelihoodMap.getLikelihoodAssociatedWithReadAndAllele(processedRead, allele);
if( log10l < worstAllowedLog10l ) {
processedReadAlleleLikelihoodMap.add(originalRead, allele, worstAllowedLog10l);
}
}
}
return processedReadAlleleLikelihoodMap;
return result;
}
/**
@ -343,85 +248,109 @@ public class PairHMMLikelihoodCalculationEngine implements ReadLikelihoodCalcula
pairHMMThreadLocal.get().finalizeRegion();
}
@Override
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods( final AssemblyResultSet assemblyResultSet, final Map<String, List<GATKSAMRecord>> perSampleReadList ) {
public ReadLikelihoods<Haplotype> computeReadLikelihoods( final AssemblyResultSet assemblyResultSet, final List<String> samples, final Map<String, List<GATKSAMRecord>> perSampleReadList ) {
final List<Haplotype> haplotypes = assemblyResultSet.getHaplotypeList();
// configure the HMM
initializePairHMM(haplotypes, perSampleReadList);
// Add likelihoods for each sample's reads to our stratifiedReadMap
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = new LinkedHashMap<>();
for( final Map.Entry<String, List<GATKSAMRecord>> sampleEntry : perSampleReadList.entrySet() ) {
// evaluate the likelihood of the reads given those haplotypes
final PerReadAlleleLikelihoodMap map = computeReadLikelihoods(haplotypes, sampleEntry.getValue());
map.filterPoorlyModelledReads(EXPECTED_ERROR_RATE_PER_BASE);
stratifiedReadMap.put(sampleEntry.getKey(), map);
// Add likelihoods for each sample's reads to our result
final ReadLikelihoods<Haplotype> result = new ReadLikelihoods<>(samples, haplotypes, perSampleReadList);
final int sampleCount = result.sampleCount();
for (int s = 0; s < sampleCount; s++) {
final ReadLikelihoods.Matrix<Haplotype> sampleLikelihoods = result.sampleMatrix(s);
computeReadLikelihoods(sampleLikelihoods);
}
//Used mostly by the JNI implementation(s) to free arrays
finalizePairHMM();
return stratifiedReadMap;
result.normalizeLikelihoods(false, log10globalReadMismappingRate);
result.filterPoorlyModeledReads(EXPECTED_ERROR_RATE_PER_BASE);
finalizePairHMM();
return result;
}
private PerReadAlleleLikelihoodMap computeReadLikelihoods( final List<Haplotype> haplotypes, final List<GATKSAMRecord> reads) {
private void computeReadLikelihoods( final ReadLikelihoods.Matrix<Haplotype> likelihoods) {
// Modify the read qualities by applying the PCR error model and capping the minimum base,insertion,deletion qualities
List<GATKSAMRecord> processedReads = modifyReadQualities(reads);
// Get alleles corresponding to our haplotypees
Map<Allele, Haplotype> alleleHaplotypeMap = createAlleleMap(haplotypes);
// Get an array containing the constantGCP for each read in our modified read list
Map<GATKSAMRecord,byte[]> GCPArrayMap = fillGCPArrays(processedReads);
final List<GATKSAMRecord> processedReads = modifyReadQualities(likelihoods.reads());
// Run the PairHMM to calculate the log10 likelihood of each (processed) reads' arising from each haplotype
PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap = pairHMMThreadLocal.get().computeLikelihoods(processedReads, alleleHaplotypeMap, GCPArrayMap);
pairHMMThreadLocal.get().computeLikelihoods(likelihoods,processedReads,constantGCP);
// Generate a new map containing the original, unmodified reads, and with minimal reference haplotype log10ls determined from the global mis-mapping rate
if (WRITE_LIKELIHOODS_TO_FILE)
writeDebugLikelihoods(likelihoods);
}
return capReferenceHaplotypeLikelihoods(perReadAlleleLikelihoodMap, reads, processedReads, alleleHaplotypeMap);
private void writeDebugLikelihoods(final ReadLikelihoods.Matrix<Haplotype> likelihoods) {
final List<GATKSAMRecord> reads = likelihoods.reads();
final List<Haplotype> haplotypes = likelihoods.alleles();
final int haplotypeCount = haplotypes.size();
final int readCount = reads.size();
for (int r = 0; r < readCount; r++)
for (int a = 0; a < haplotypeCount; a++)
writeDebugLikelihoods(reads.get(r),haplotypes.get(a),likelihoods.get(a,r));
likelihoodsStream.flush();
}
private void writeDebugLikelihoods(final GATKSAMRecord processedRead, final Haplotype haplotype, final double log10l){
likelihoodsStream.printf("%s %s %s %s %s %s %f%n",
haplotype.getBaseString(),
new String(processedRead.getReadBases() ),
SAMUtils.phredToFastq(processedRead.getBaseQualities()),
SAMUtils.phredToFastq(processedRead.getBaseInsertionQualities() ),
SAMUtils.phredToFastq(processedRead.getBaseDeletionQualities() ),
SAMUtils.phredToFastq(constantGCP),
log10l);
}
@Requires({"alleleOrdering.size() > 0"})
@Ensures({"result.length == result[0].length", "result.length == alleleOrdering.size()"})
@Deprecated
public static double[][] computeDiploidHaplotypeLikelihoods( final String sample,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap,
final List<Allele> alleleOrdering,
final ReadLikelihoods readLikelihoods,
final List alleleOrdering,
final boolean normalize ) {
return computeDiploidHaplotypeLikelihoods(Collections.singleton(sample), stratifiedReadMap, alleleOrdering, normalize);
return computeDiploidHaplotypeLikelihoods(Collections.singleton(sample), readLikelihoods, alleleOrdering, normalize);
}
@Requires({"alleleOrdering.size() > 0"})
@Ensures({"result.length == result[0].length", "result.length == alleleOrdering.size()"})
public static double[][] computeDiploidHaplotypeLikelihoods( final Set<String> samples,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap,
final List<Allele> alleleOrdering,
@Deprecated
private static double[][] computeDiploidHaplotypeLikelihoods( final Set<String> samples,
final ReadLikelihoods readLikelihoods,
final List alleleOrdering,
final boolean normalize) {
final int numHaplotypes = alleleOrdering.size();
final int[] alleleIndices = new int[alleleOrdering.size()];
final ListIterator alleleIterator = alleleOrdering.listIterator();
int nextAlleleIndex = 0;
while (alleleIterator.hasNext())
if ((alleleIndices[nextAlleleIndex++] = readLikelihoods.alleleIndex((Allele) alleleIterator.next())) == -1)
throw new IllegalArgumentException("allele " + alleleIterator.previous() + " not found in likelihood collection ");
final double[][] haplotypeLikelihoodMatrix = new double[numHaplotypes][numHaplotypes];
for( int iii = 0; iii < numHaplotypes; iii++ ) {
Arrays.fill(haplotypeLikelihoodMatrix[iii], Double.NEGATIVE_INFINITY);
}
// compute the diploid haplotype likelihoods
for( int iii = 0; iii < numHaplotypes; iii++ ) {
final Allele iii_allele = alleleOrdering.get(iii);
for( int jjj = 0; jjj <= iii; jjj++ ) {
final Allele jjj_allele = alleleOrdering.get(jjj);
double haplotypeLikelihood = 0.0;
for( final String sample : samples ) {
for( final Map.Entry<GATKSAMRecord, Map<Allele,Double>> entry : stratifiedReadMap.get(sample).getLikelihoodReadMap().entrySet() ) {
// Compute log10(10^x1/2 + 10^x2/2) = log10(10^x1+10^x2)-log10(2)
// First term is approximated by Jacobian log with table lookup.
haplotypeLikelihood += ( MathUtils.approximateLog10SumLog10(entry.getValue().get(iii_allele), entry.getValue().get(jjj_allele)) + MathUtils.LOG_ONE_HALF );
for(final String sample : samples) {
final int sampleIndex = readLikelihoods.sampleIndex(sample);
if (sampleIndex == -1)
throw new IllegalArgumentException("the sample provided is not in the likelihood collection");
final ReadLikelihoods.Matrix sampleLikelihoods = readLikelihoods.sampleMatrix(sampleIndex);
final int sampleReadCount = readLikelihoods.sampleReadCount(sampleIndex);
for( int iii = 0; iii < numHaplotypes; iii++ ) {
final int iii_allele = alleleIndices[iii];
for( int jjj = 0; jjj <= iii; jjj++ ) {
final int jjj_allele = alleleIndices[jjj];
double haplotypeLikelihood = 0.0;
for (int r = 0; r < sampleReadCount; r++) {
final double value = MathUtils.approximateLog10SumLog10(sampleLikelihoods.get(iii_allele,r),
sampleLikelihoods.get(jjj_allele,r)) + MathUtils.LOG_ONE_HALF;
haplotypeLikelihood += value;
}
haplotypeLikelihoodMatrix[iii][jjj] += haplotypeLikelihood;
}
haplotypeLikelihoodMatrix[iii][jjj] = haplotypeLikelihood;
}
}
@ -431,6 +360,7 @@ public class PairHMMLikelihoodCalculationEngine implements ReadLikelihoodCalcula
@Requires({"likelihoodMatrix.length == likelihoodMatrix[0].length"})
@Ensures({"result.length == result[0].length", "result.length == likelihoodMatrix.length"})
@Deprecated
protected static double[][] normalizeDiploidLikelihoodMatrixFromLog10( final double[][] likelihoodMatrix ) {
final int numHaplotypes = likelihoodMatrix.length;
double[] genotypeLikelihoods = new double[numHaplotypes*(numHaplotypes+1)/2];
@ -450,131 +380,6 @@ public class PairHMMLikelihoodCalculationEngine implements ReadLikelihoodCalcula
return likelihoodMatrix;
}
// --------------------------------------------------------------------------------
//
// System to compute the best N haplotypes for genotyping
//
// --------------------------------------------------------------------------------
//
// /**
// * Helper function for selectBestHaplotypesFromEachSample that updates the score of haplotype haplotypeAsAllele
// * @param map an annoying map object that moves us between the allele and haplotype representation
// * @param haplotypeAsAllele the allele version of the haplotype
// * @return the haplotype version, with its score incremented by 1 if its non-reference
// */
// private Haplotype updateSelectHaplotype(final Map<Allele, Haplotype> map, final Allele haplotypeAsAllele) {
// final Haplotype h = map.get(haplotypeAsAllele); // TODO -- fixme when haplotypes are properly generic
// if ( h.isNonReference() ) h.setScore(h.getScore() + 1); // ref is already at max value
// return h;
// }
//
// /**
// * Take the best N haplotypes and return them as a list
// *
// * Only considers the haplotypes selectedHaplotypes that were actually selected by at least one sample
// * as it's preferred haplotype. Takes the best N haplotypes from selectedHaplotypes in decreasing
// * order of score (so higher score haplotypes are preferred). The N we take is determined by
// *
// * N = min(2 * nSamples + 1, maxNumHaplotypesInPopulation)
// *
// * where 2 * nSamples is the number of chromosomes in 2 samples including the reference, and our workload is
// * bounded by maxNumHaplotypesInPopulation as that number can grow without bound
// *
// * @param selectedHaplotypes a non-null set of haplotypes with scores >= 1
// * @param nSamples the number of samples used to select the haplotypes
// * @param maxNumHaplotypesInPopulation the maximum number of haplotypes we're allowed to take, regardless of nSamples
// * @return a list of N or fewer haplotypes, with the reference haplotype first
// */
// private List<Haplotype> selectBestHaplotypesAccordingToScore(final Set<Haplotype> selectedHaplotypes, final int nSamples, final int maxNumHaplotypesInPopulation) {
// final List<Haplotype> selectedHaplotypesList = new ArrayList<>(selectedHaplotypes);
// Collections.sort(selectedHaplotypesList, new HaplotypeScoreComparator());
// final int numChromosomesInSamplesPlusRef = 2 * nSamples + 1;
// final int haplotypesToKeep = Math.min(numChromosomesInSamplesPlusRef, maxNumHaplotypesInPopulation);
// final List<Haplotype> bestHaplotypes = selectedHaplotypesList.size() <= haplotypesToKeep ? selectedHaplotypesList : selectedHaplotypesList.subList(0, haplotypesToKeep);
// if ( bestHaplotypes.get(0).isNonReference()) throw new IllegalStateException("BUG: reference haplotype should be first in list");
// return bestHaplotypes;
// }
//
// /**
// * Select the best haplotypes for genotyping the samples in stratifiedReadMap
// *
// * Selects these haplotypes by counting up how often each haplotype is selected as one of the most likely
// * haplotypes per sample. What this means is that each sample computes the diploid genotype likelihoods for
// * all possible pairs of haplotypes, and the pair with the highest likelihood has each haplotype each get
// * one extra count for each haplotype (so hom-var haplotypes get two counts). After performing this calculation
// * the best N haplotypes are selected (@see #selectBestHaplotypesAccordingToScore) and a list of the
// * haplotypes in order of score are returned, ensuring that at least one of the haplotypes is reference.
// *
// * @param haplotypes a list of all haplotypes we're considering
// * @param stratifiedReadMap a map from sample -> read likelihoods per haplotype
// * @param maxNumHaplotypesInPopulation the max. number of haplotypes we can select from haplotypes
// * @return a list of selected haplotypes with size <= maxNumHaplotypesInPopulation
// */
// public List<Haplotype> selectBestHaplotypesFromEachSample(final List<Haplotype> haplotypes, final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap, final int maxNumHaplotypesInPopulation) {
// if ( haplotypes.size() < 2 ) throw new IllegalArgumentException("Must have at least 2 haplotypes to consider but only have " + haplotypes);
//
// if ( haplotypes.size() == 2 ) return haplotypes; // fast path -- we'll always want to use 2 haplotypes
//
// // all of the haplotypes that at least one sample called as one of the most likely
// final Set<Haplotype> selectedHaplotypes = new HashSet<>();
// selectedHaplotypes.add(findReferenceHaplotype(haplotypes)); // ref is always one of the selected
//
// // our annoying map from allele -> haplotype
// final Map<Allele, Haplotype> allele2Haplotype = new HashMap<>();
// for ( final Haplotype h : haplotypes ) {
// h.setScore(h.isReference() ? Double.MAX_VALUE : 0.0); // set all of the scores to 0 (lowest value) for all non-ref haplotypes
// allele2Haplotype.put(Allele.create(h, h.isReference()), h);
// }
//
// // for each sample, compute the most likely pair of haplotypes
// for ( final Map.Entry<String, PerReadAlleleLikelihoodMap> entry : stratifiedReadMap.entrySet() ) {
// // get the two most likely haplotypes under a diploid model for this sample
// final MostLikelyAllele mla = entry.getValue().getMostLikelyDiploidAlleles();
//
// if ( mla != null ) { // there was something to evaluate in this sample
// // note that there must be at least 2 haplotypes
// final Haplotype best = updateSelectHaplotype(allele2Haplotype, mla.getMostLikelyAllele());
// final Haplotype second = updateSelectHaplotype(allele2Haplotype, mla.getSecondMostLikelyAllele());
//
//// if ( DEBUG ) {
//// logger.info("Chose haplotypes " + best + " " + best.getCigar() + " and " + second + " " + second.getCigar() + " for sample " + entry.getKey());
//// }
//
// // add these two haplotypes to the set of haplotypes that have been selected
// selectedHaplotypes.add(best);
// selectedHaplotypes.add(second);
//
// // we've already selected all of our haplotypes, and we don't need to prune them down
// if ( selectedHaplotypes.size() == haplotypes.size() && haplotypes.size() < maxNumHaplotypesInPopulation )
// break;
// }
// }
//
// // take the best N haplotypes forward, in order of the number of samples that choose them
// final int nSamples = stratifiedReadMap.size();
// final List<Haplotype> bestHaplotypes = selectBestHaplotypesAccordingToScore(selectedHaplotypes, nSamples, maxNumHaplotypesInPopulation);
//
// if ( DEBUG ) {
// logger.info("Chose " + (bestHaplotypes.size() - 1) + " alternate haplotypes to genotype in all samples.");
// for ( final Haplotype h : bestHaplotypes ) {
// logger.info("\tHaplotype " + h.getCigar() + " selected for further genotyping" + (h.isNonReference() ? " found " + (int)h.getScore() + " haplotypes" : " as ref haplotype"));
// }
// }
// return bestHaplotypes;
// }
//
// /**
// * Find the haplotype that isRef(), or @throw ReviewedGATKException if one isn't found
// * @param haplotypes non-null list of haplotypes
// * @return the reference haplotype
// */
// private static Haplotype findReferenceHaplotype( final List<Haplotype> haplotypes ) {
// for( final Haplotype h : haplotypes ) {
// if( h.isReference() ) return h;
// }
// throw new ReviewedGATKException( "No reference haplotype found in the list of haplotypes!" );
// }
// --------------------------------------------------------------------------------
//
// Experimental attempts at PCR error rate modeling

View File

@ -1,52 +1,53 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.gatk.tools.walkers.haplotypecaller;
import org.broadinstitute.gatk.engine.GenomeAnalysisEngine;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import htsjdk.variant.variantcontext.Allele;
@ -62,21 +63,25 @@ import java.util.Random;
public class RandomLikelihoodCalculationEngine implements ReadLikelihoodCalculationEngine {
@Override
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods(final AssemblyResultSet assemblyResultSet, final Map<String, List<GATKSAMRecord>> reads) {
public ReadLikelihoods computeReadLikelihoods(final AssemblyResultSet assemblyResultSet,
final List<String> samples,
final Map<String, List<GATKSAMRecord>> reads) {
final List<Haplotype> haplotypes = assemblyResultSet.getHaplotypeList();
final Map<String,PerReadAlleleLikelihoodMap> result = new HashMap<>(reads.size());
final ReadLikelihoods result = new ReadLikelihoods(samples, haplotypes, reads);
final Map<Haplotype,Allele> alleles = new HashMap<>(haplotypes.size());
for (final Haplotype haplotype : haplotypes)
alleles.put(haplotype,Allele.create(haplotype,false));
final Random rnd = GenomeAnalysisEngine.getRandomGenerator();
for (final String sample : reads.keySet()) {
final PerReadAlleleLikelihoodMap pralm = new PerReadAlleleLikelihoodMap();
for (final GATKSAMRecord read : reads.get(sample))
for (final Haplotype haplotype : haplotypes )
pralm.add(read,alleles.get(haplotype),-Math.abs(rnd.nextDouble()));
result.put(sample,pralm);
final int sampleCount = samples.size();
final int alleleCount = alleles.size();
for (int i = 0; i < sampleCount; i++) {
final List<GATKSAMRecord> sampleReads = result.sampleReads(i);
final int readCount = sampleReads.size();
final ReadLikelihoods.Matrix<Haplotype> sampleLikelihoods = result.sampleMatrix(i);
for (int a = 0; a < alleleCount; a++)
for (int r = 0; r < readCount; r++)
sampleLikelihoods.set(a,r,-Math.abs(rnd.nextDouble()));
}
return result;
}

View File

@ -46,7 +46,8 @@
package org.broadinstitute.gatk.tools.walkers.haplotypecaller;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import java.util.List;
@ -87,7 +88,7 @@ public interface ReadLikelihoodCalculationEngine {
* @return never {@code null}, and with at least one entry for input sample (keys in {@code perSampleReadList}.
* The value maps can be potentially empty though.
*/
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods(AssemblyResultSet assemblyResultSet,
public ReadLikelihoods<Haplotype> computeReadLikelihoods(AssemblyResultSet assemblyResultSet, List<String> samples,
Map<String, List<GATKSAMRecord>> perSampleReadList);
public void close();

View File

@ -47,13 +47,16 @@
package org.broadinstitute.gatk.tools.walkers.haplotypecaller;
import htsjdk.samtools.*;
import htsjdk.variant.variantcontext.*;
import htsjdk.variant.vcf.VCFHeaderLine;
import htsjdk.variant.vcf.VCFSimpleHeaderLine;
import org.broadinstitute.gatk.engine.contexts.AlignmentContext;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.GenomeLocParser;
import org.broadinstitute.gatk.utils.MathUtils;
import org.broadinstitute.gatk.utils.QualityUtils;
import org.broadinstitute.gatk.utils.activeregion.ActiveRegion;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.locusiterator.LocusIteratorByState;
import org.broadinstitute.gatk.utils.pileup.PileupElement;
@ -62,9 +65,6 @@ import org.broadinstitute.gatk.utils.pileup.ReadBackedPileupImpl;
import org.broadinstitute.gatk.utils.sam.AlignmentUtils;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import org.broadinstitute.gatk.utils.variant.GATKVariantContextUtils;
import htsjdk.variant.variantcontext.*;
import htsjdk.variant.vcf.VCFHeaderLine;
import htsjdk.variant.vcf.VCFSimpleHeaderLine;
import java.io.File;
import java.util.*;
@ -86,6 +86,7 @@ public class ReferenceConfidenceModel {
private final GenomeLocParser genomeLocParser;
private final Set<String> samples;
private final SAMFileHeader header; // TODO -- really shouldn't depend on this
private final int indelInformativeDepthIndelSize;
private final static boolean WRITE_DEBUGGING_BAM = false;
@ -113,6 +114,7 @@ public class ReferenceConfidenceModel {
this.genomeLocParser = genomeLocParser;
this.samples = samples;
this.header = header;
this.indelInformativeDepthIndelSize = indelInformativeDepthIndelSize;
if ( WRITE_DEBUGGING_BAM ) {
@ -156,10 +158,10 @@ public class ReferenceConfidenceModel {
*
* @param refHaplotype the reference haplotype, used to get the reference bases across activeRegion.getLoc()
* @param calledHaplotypes a list of haplotypes that segregate in this region, for realignment of the reads in the
* stratifiedReadMap, corresponding to each reads best haplotype. Must contain the refHaplotype.
* readLikelihoods, corresponding to each reads best haplotype. Must contain the refHaplotype.
* @param paddedReferenceLoc the location of refHaplotype (which might be larger than activeRegion.getLoc())
* @param activeRegion the active region we want to get the reference confidence over
* @param stratifiedReadMap a map from a single sample to its PerReadAlleleLikelihoodMap for each haplotype in calledHaplotypes
* @param readLikelihoods a map from a single sample to its PerReadAlleleLikelihoodMap for each haplotype in calledHaplotypes
* @param variantCalls calls made in this region. The return result will contain any variant call in this list in the
* correct order by genomic position, and any variant in this list will stop us emitting a ref confidence
* under any position it covers (for snps and insertions that is 1 bp, but for deletions its the entire ref span)
@ -170,22 +172,22 @@ public class ReferenceConfidenceModel {
final Collection<Haplotype> calledHaplotypes,
final GenomeLoc paddedReferenceLoc,
final ActiveRegion activeRegion,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap,
final ReadLikelihoods<Haplotype> readLikelihoods,
final List<VariantContext> variantCalls) {
if ( refHaplotype == null ) throw new IllegalArgumentException("refHaplotype cannot be null");
if ( calledHaplotypes == null ) throw new IllegalArgumentException("calledHaplotypes cannot be null");
if ( !calledHaplotypes.contains(refHaplotype)) throw new IllegalArgumentException("calledHaplotypes must contain the refHaplotype");
if ( paddedReferenceLoc == null ) throw new IllegalArgumentException("paddedReferenceLoc cannot be null");
if ( activeRegion == null ) throw new IllegalArgumentException("activeRegion cannot be null");
if ( stratifiedReadMap == null ) throw new IllegalArgumentException("stratifiedReadMap cannot be null");
if ( stratifiedReadMap.size() != 1 ) throw new IllegalArgumentException("stratifiedReadMap must contain exactly one sample but it contained " + stratifiedReadMap.size());
if ( readLikelihoods == null ) throw new IllegalArgumentException("readLikelihoods cannot be null");
if ( readLikelihoods.sampleCount() != 1 ) throw new IllegalArgumentException("readLikelihoods must contain exactly one sample but it contained " + readLikelihoods.sampleCount());
if ( refHaplotype.length() != activeRegion.getExtendedLoc().size() ) throw new IllegalArgumentException("refHaplotype " + refHaplotype.length() + " and activeRegion location size " + activeRegion.getLocation().size() + " are different");
final GenomeLoc refSpan = activeRegion.getLocation();
final List<ReadBackedPileup> refPileups = getPileupsOverReference(refHaplotype, calledHaplotypes, paddedReferenceLoc, activeRegion, refSpan, stratifiedReadMap);
final List<ReadBackedPileup> refPileups = getPileupsOverReference(refHaplotype, calledHaplotypes, paddedReferenceLoc, activeRegion, refSpan, readLikelihoods);
final byte[] ref = refHaplotype.getBases();
final List<VariantContext> results = new ArrayList<>(refSpan.size());
final String sampleName = stratifiedReadMap.keySet().iterator().next();
final String sampleName = readLikelihoods.sample(0);
final int globalRefOffset = refSpan.getStart() - activeRegion.getExtendedLoc().getStart();
for ( final ReadBackedPileup pileup : refPileups ) {
@ -311,15 +313,15 @@ public class ReferenceConfidenceModel {
final GenomeLoc paddedReferenceLoc,
final ActiveRegion activeRegion,
final GenomeLoc activeRegionSpan,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap) {
final ReadLikelihoods<Haplotype> readLikelihoods) {
if ( refHaplotype == null ) throw new IllegalArgumentException("refHaplotype cannot be null");
if ( calledHaplotypes == null ) throw new IllegalArgumentException("calledHaplotypes cannot be null");
if ( !calledHaplotypes.contains(refHaplotype)) throw new IllegalArgumentException("calledHaplotypes must contain the refHaplotype");
if ( paddedReferenceLoc == null ) throw new IllegalArgumentException("paddedReferenceLoc cannot be null");
if ( activeRegion == null ) throw new IllegalArgumentException("activeRegion cannot be null");
if ( stratifiedReadMap == null ) throw new IllegalArgumentException("stratifiedReadMap cannot be null");
if ( stratifiedReadMap.size() != 1 ) throw new IllegalArgumentException("stratifiedReadMap must contain exactly one sample but it contained " + stratifiedReadMap.size());
if ( readLikelihoods == null ) throw new IllegalArgumentException("readLikelihoods cannot be null");
if ( readLikelihoods.sampleCount() != 1 ) throw new IllegalArgumentException("readLikelihoods must contain exactly one sample but it contained " + readLikelihoods.sampleCount());
final List<GATKSAMRecord> reads = activeRegion.getReads();

View File

@ -47,11 +47,10 @@
package org.broadinstitute.gatk.utils.haplotype;
import com.google.java.contract.Requires;
import htsjdk.variant.variantcontext.VariantContext;
import org.broadinstitute.gatk.tools.walkers.haplotypecaller.PairHMMLikelihoodCalculationEngine;
import org.broadinstitute.gatk.utils.MathUtils;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import htsjdk.variant.variantcontext.Allele;
import htsjdk.variant.variantcontext.VariantContext;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import java.util.*;
@ -66,7 +65,7 @@ import java.util.*;
*/
public class HaplotypeLDCalculator {
private final List<Haplotype> haplotypes;
private final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap;
private final ReadLikelihoods<Haplotype> readLikelihoods;
private List<Map<Haplotype, Double>> haplotypeLikelihoodsPerSample = null;
// linear contigency table with table[0] == [0][0], table[1] = [0][1], table[2] = [1][0], table[3] = [1][1]
@ -75,14 +74,15 @@ public class HaplotypeLDCalculator {
/**
* For testing
*/
@SuppressWarnings("unchecked")
protected HaplotypeLDCalculator() {
haplotypes = Collections.emptyList();
haplotypeReadMap = Collections.emptyMap();
readLikelihoods = new ReadLikelihoods<>((List<String>)Collections.EMPTY_LIST, (List<Haplotype>)Collections.EMPTY_LIST, Collections.EMPTY_MAP);
}
public HaplotypeLDCalculator(List<Haplotype> haplotypes, Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap) {
public HaplotypeLDCalculator(final List<Haplotype> haplotypes, final ReadLikelihoods<Haplotype> haplotypeReadMap) {
this.haplotypes = haplotypes;
this.haplotypeReadMap = haplotypeReadMap;
this.readLikelihoods = haplotypeReadMap;
}
/**
@ -94,13 +94,13 @@ public class HaplotypeLDCalculator {
private void buildHaplotypeLikelihoodsPerSampleIfNecessary() {
if ( haplotypeLikelihoodsPerSample == null ) {
// do the lazy computation
final Set<String> samples = haplotypeReadMap.keySet();
haplotypeLikelihoodsPerSample = new LinkedList<Map<Haplotype, Double>>();
final Set<String> samples = new LinkedHashSet<>(readLikelihoods.samples());
haplotypeLikelihoodsPerSample = new LinkedList<>();
for( final String sample : samples ) {
final Map<Haplotype, Double> map = new HashMap<Haplotype, Double>(haplotypes.size());
final Map<Haplotype, Double> map = new HashMap<>(haplotypes.size());
for( final Haplotype h : haplotypes ) {
// count up the co-occurrences of the events for the R^2 calculation
final double haplotypeLikelihood = PairHMMLikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods(sample, haplotypeReadMap, Collections.singletonList(Allele.create(h, true)), false)[0][0];
final double haplotypeLikelihood = PairHMMLikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods(sample, readLikelihoods, Collections.singletonList(h), false)[0][0];
map.put(h, haplotypeLikelihood);
}
haplotypeLikelihoodsPerSample.add(map);
@ -162,7 +162,7 @@ public class HaplotypeLDCalculator {
*
* The probability is just p11_22 / (p11_22 + p hets)
*
* @table linear contigency table with table[0] == [0][0], table[1] = [0][1], table[2] = [1][0], table[3] = [1][1]
* @param table linear contigency table with table[0] == [0][0], table[1] = [0][1], table[2] = [1][0], table[3] = [1][1]
* doesn't have to be normalized as this function does the normalization internally
* @return the real space probability that the data is phased
*/

View File

@ -49,12 +49,15 @@ package org.broadinstitute.gatk.utils.haplotype;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import htsjdk.variant.variantcontext.Allele;
import htsjdk.variant.variantcontext.VariantContext;
import htsjdk.variant.variantcontext.VariantContextBuilder;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import java.util.*;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.TreeSet;
/**
* Merges VariantContexts in a series of haplotypes according to their pairwise LD
@ -94,19 +97,19 @@ public class LDMerger extends MergeVariantsAcrossHaplotypes {
* Merge as many events among the haplotypes as possible based on pairwise LD among variants
*
* @param haplotypes a list of haplotypes whose events we want to merge
* @param haplotypeReadMap map from sample name -> read likelihoods for each haplotype
* @param readLikelihoods map from sample name -> read likelihoods for each haplotype
* @param startPosKeySet a set of starting positions of all events among the haplotypes
* @param ref the reference bases
* @param refLoc the span of the reference bases
*/
@Override
public boolean merge( final List<Haplotype> haplotypes,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final ReadLikelihoods<Haplotype> readLikelihoods,
final TreeSet<Integer> startPosKeySet,
final byte[] ref,
final GenomeLoc refLoc ) {
if ( haplotypes == null ) throw new IllegalArgumentException("haplotypes cannot be null");
if ( haplotypeReadMap == null ) throw new IllegalArgumentException("haplotypeReadMap cannot be null");
if ( readLikelihoods == null ) throw new IllegalArgumentException("readLikelihoods cannot be null");
if ( startPosKeySet == null ) throw new IllegalArgumentException("startPosKeySet cannot be null");
if ( ref == null ) throw new IllegalArgumentException("ref cannot be null");
if ( refLoc == null ) throw new IllegalArgumentException("refLoc cannot be null");
@ -114,8 +117,8 @@ public class LDMerger extends MergeVariantsAcrossHaplotypes {
if( startPosKeySet.size() <= 1 ) { return false; }
final int nSamples = haplotypeReadMap.keySet().size();
final HaplotypeLDCalculator r2Calculator = new HaplotypeLDCalculator(haplotypes, haplotypeReadMap);
final int nSamples = readLikelihoods.sampleCount();
final HaplotypeLDCalculator r2Calculator = new HaplotypeLDCalculator(haplotypes, readLikelihoods);
boolean somethingWasMerged = false;
boolean mapWasUpdated = true;
while( mapWasUpdated ) {
@ -207,7 +210,7 @@ public class LDMerger extends MergeVariantsAcrossHaplotypes {
* @param haplotypes our haplotypes
* @param thisStart the starting position of the first event to merge
* @param nextStart the starting position of the next event to merge
* @return
* @return never {@code null}.
*/
private LDMergeData getPairOfEventsToMerge(final List<Haplotype> haplotypes, final int thisStart, final int nextStart) {
final LDMergeData mergeData = new LDMergeData();

View File

@ -47,10 +47,9 @@
package org.broadinstitute.gatk.utils.haplotype;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
/**
@ -63,14 +62,14 @@ public class MergeVariantsAcrossHaplotypes {
* Merge variants across the haplotypes, updating the haplotype event maps and startPos set as appropriate
*
* @param haplotypes a list of haplotypes whose events we want to merge
* @param haplotypeReadMap map from sample name -> read likelihoods for each haplotype
* @param readLikelihoods map from sample name -> read likelihoods for each haplotype
* @param startPosKeySet a set of starting positions of all events among the haplotypes
* @param ref the reference bases
* @param refLoc the span of the reference bases
* @return true if anything was merged
*/
public boolean merge( final List<Haplotype> haplotypes,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final ReadLikelihoods<Haplotype> readLikelihoods,
final TreeSet<Integer> startPosKeySet,
final byte[] ref,
final GenomeLoc refLoc ) {

View File

@ -47,13 +47,13 @@
package org.broadinstitute.gatk.utils.haplotypeBAMWriter;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import htsjdk.variant.variantcontext.Allele;
import java.util.*;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
/**
* A haplotype bam writer that writes out all haplotypes as reads and then
@ -66,6 +66,7 @@ import java.util.*;
* Time: 1:50 PM
*/
class AllHaplotypeBAMWriter extends HaplotypeBAMWriter {
public AllHaplotypeBAMWriter(final ReadDestination destination) {
super(destination);
}
@ -78,13 +79,11 @@ class AllHaplotypeBAMWriter extends HaplotypeBAMWriter {
final GenomeLoc paddedReferenceLoc,
final Collection<Haplotype> bestHaplotypes,
final Set<Haplotype> calledHaplotypes,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap) {
final ReadLikelihoods<Haplotype> readLikelihoods) {
writeHaplotypesAsReads(haplotypes, new HashSet<>(bestHaplotypes), paddedReferenceLoc);
for ( final PerReadAlleleLikelihoodMap readAlleleLikelihoodMap : stratifiedReadMap.values() ) {
for( final GATKSAMRecord read : readAlleleLikelihoodMap.getLikelihoodReadMap().keySet() ) {
final int sampleCount = readLikelihoods.sampleCount();
for (int s = 0; s < sampleCount; s++)
for (final GATKSAMRecord read : readLikelihoods.sampleReads(s))
writeReadAgainstHaplotype(read);
}
}
}
}

View File

@ -47,15 +47,11 @@
package org.broadinstitute.gatk.utils.haplotypeBAMWriter;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import htsjdk.variant.variantcontext.Allele;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
@ -82,16 +78,15 @@ class CalledHaplotypeBAMWriter extends HaplotypeBAMWriter {
final GenomeLoc paddedReferenceLoc,
final Collection<Haplotype> bestHaplotypes,
final Set<Haplotype> calledHaplotypes,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap) {
final ReadLikelihoods<Haplotype> readLikelihoods) {
if ( calledHaplotypes.isEmpty() ) // only write out called haplotypes
return;
writeHaplotypesAsReads(calledHaplotypes, calledHaplotypes, paddedReferenceLoc);
for ( final PerReadAlleleLikelihoodMap readAlleleLikelihoodMap : stratifiedReadMap.values() ) {
for ( final GATKSAMRecord read : readAlleleLikelihoodMap.getLikelihoodReadMap().keySet() ) {
final int sampleCount = readLikelihoods.sampleCount();
for (int s = 0; s < sampleCount; s++)
for (final GATKSAMRecord read : readLikelihoods.sampleReads(s))
writeReadAgainstHaplotype(read);
}
}
}
}

View File

@ -46,22 +46,18 @@
package org.broadinstitute.gatk.utils.haplotypeBAMWriter;
import htsjdk.samtools.Cigar;
import htsjdk.samtools.SAMFileHeader;
import htsjdk.samtools.SAMTag;
import org.broadinstitute.gatk.engine.io.GATKSAMFileWriter;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.Utils;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.sam.AlignmentUtils;
import org.broadinstitute.gatk.utils.sam.CigarUtils;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import org.broadinstitute.gatk.utils.smithwaterman.SWPairwiseAlignment;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
@ -155,17 +151,17 @@ public abstract class HaplotypeBAMWriter {
* @param paddedReferenceLoc the span of the based reference here
* @param bestHaplotypes a list of the best (a subset of all) haplotypes that actually went forward into genotyping
* @param calledHaplotypes a list of the haplotypes at where actually called as non-reference
* @param stratifiedReadMap a map from sample -> likelihoods for each read for each of the best haplotypes
* @param readLikelihoods a map from sample -> likelihoods for each read for each of the best haplotypes
*/
public abstract void writeReadsAlignedToHaplotypes(final Collection<Haplotype> haplotypes,
final GenomeLoc paddedReferenceLoc,
final Collection<Haplotype> bestHaplotypes,
final Set<Haplotype> calledHaplotypes,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap);
final ReadLikelihoods<Haplotype> readLikelihoods);
public void writeReadsAlignedToHaplotypes(final Collection<Haplotype> haplotypes,
final GenomeLoc paddedReferenceLoc,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap) {
final ReadLikelihoods<Haplotype> stratifiedReadMap) {
writeReadsAlignedToHaplotypes(haplotypes, paddedReferenceLoc, haplotypes, new HashSet<>(haplotypes), stratifiedReadMap);
}
@ -210,7 +206,7 @@ public abstract class HaplotypeBAMWriter {
record.setCigar(AlignmentUtils.consolidateCigar(haplotype.getCigar()));
record.setMappingQuality(isAmongBestHaplotypes ? 60 : 0);
record.setReadName("HC" + uniqueNameCounter++);
record.setAttribute(AlignmentUtils.HAPLOTYPE_TAG, haplotype.hashCode());
record.setAttribute(AlignmentUtils.HAPLOTYPE_TAG,haplotype.hashCode());
record.setReadUnmappedFlag(false);
record.setReferenceIndex(paddedRefLoc.getContigIndex());
record.setAttribute(SAMTag.RG.toString(), READ_GROUP_ID);

View File

@ -25,13 +25,16 @@
package org.broadinstitute.gatk.engine.downsampling;
import org.broadinstitute.gatk.utils.*;
import org.apache.log4j.Logger;
import org.broadinstitute.gatk.utils.BaseUtils;
import org.broadinstitute.gatk.utils.MathUtils;
import org.broadinstitute.gatk.utils.collections.DefaultHashMap;
import org.broadinstitute.gatk.utils.exceptions.GATKException;
import org.broadinstitute.gatk.utils.exceptions.UserException;
import org.broadinstitute.gatk.utils.pileup.*;
import org.broadinstitute.gatk.utils.pileup.PileupElement;
import org.broadinstitute.gatk.utils.pileup.ReadBackedPileup;
import org.broadinstitute.gatk.utils.pileup.ReadBackedPileupImpl;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import org.broadinstitute.gatk.utils.BaseUtils;
import org.broadinstitute.gatk.utils.text.XReadLines;
import htsjdk.variant.variantcontext.Allele;
@ -39,8 +42,6 @@ import java.io.File;
import java.io.IOException;
import java.util.*;
import org.apache.log4j.Logger;
public class AlleleBiasedDownsamplingUtils {
// define this class so that we can use Java generics below
@ -216,7 +217,7 @@ public class AlleleBiasedDownsamplingUtils {
* @param downsamplingFraction the fraction of total reads to remove per allele
* @return list of reads TO REMOVE from allele biased down-sampling
*/
public static List<GATKSAMRecord> selectAlleleBiasedReads(final Map<Allele, List<GATKSAMRecord>> alleleReadMap, final double downsamplingFraction) {
public static <A extends Allele> List<GATKSAMRecord> selectAlleleBiasedReads(final Map<A, List<GATKSAMRecord>> alleleReadMap, final double downsamplingFraction) {
int totalReads = 0;
for ( final List<GATKSAMRecord> reads : alleleReadMap.values() )
totalReads += reads.size();

View File

@ -27,17 +27,18 @@ package org.broadinstitute.gatk.tools.walkers.annotator;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import org.broadinstitute.gatk.utils.commandline.RodBinding;
import htsjdk.variant.variantcontext.*;
import htsjdk.variant.vcf.*;
import org.broadinstitute.gatk.engine.GenomeAnalysisEngine;
import org.broadinstitute.gatk.engine.contexts.AlignmentContext;
import org.broadinstitute.gatk.engine.contexts.ReferenceContext;
import org.broadinstitute.gatk.engine.refdata.RefMetaDataTracker;
import org.broadinstitute.gatk.tools.walkers.annotator.interfaces.*;
import org.broadinstitute.gatk.utils.GenomeLoc;
import org.broadinstitute.gatk.utils.commandline.RodBinding;
import org.broadinstitute.gatk.utils.exceptions.UserException;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import htsjdk.variant.variantcontext.*;
import htsjdk.variant.vcf.*;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import java.util.*;
@ -204,6 +205,15 @@ public class VariantAnnotatorEngine {
return annotateDBs(tracker, annotated);
}
public VariantContext annotateContextForActiveRegion(final RefMetaDataTracker tracker,
final ReadLikelihoods<Allele> readLikelihoods,
final VariantContext vc) {
//TODO we transform the read-likelihood into the Map^2 previous version for the sake of not changing of not changing annotation interface.
//TODO should we change those interfaces?
final Map<String, PerReadAlleleLikelihoodMap> annotationLikelihoods = readLikelihoods.toPerReadAlleleLikelihoodMap();
return annotateContextForActiveRegion(tracker, annotationLikelihoods, vc);
}
public VariantContext annotateContextForActiveRegion(final RefMetaDataTracker tracker,
final Map<String, PerReadAlleleLikelihoodMap> perReadAlleleLikelihoodMap,
final VariantContext vc) {

View File

@ -29,6 +29,7 @@ import com.google.java.contract.Requires;
import org.apache.log4j.Logger;
import org.broadinstitute.gatk.utils.MathUtils;
import org.broadinstitute.gatk.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.gatk.utils.genotyper.ReadLikelihoods;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
import org.broadinstitute.gatk.utils.sam.GATKSAMRecord;
import htsjdk.variant.variantcontext.Allele;
@ -125,7 +126,27 @@ public abstract class PairHMM {
* @param readMaxLength the max length of reads we want to use with this PairHMM
*/
public void initialize( final List<Haplotype> haplotypes, final Map<String, List<GATKSAMRecord>> perSampleReadList, final int readMaxLength, final int haplotypeMaxLength ) {
initialize(readMaxLength, haplotypeMaxLength);
initialize(readMaxLength, haplotypeMaxLength);
}
private int findMaxReadLength(final GATKSAMRecord ... reads) {
int max = 0;
for (final GATKSAMRecord read : reads) {
final int readLength = read.getReadLength();
if (max < readLength)
max = readLength;
}
return max;
}
private int findMaxAlleleLength(final List<? extends Allele> alleles) {
int max = 0;
for (final Allele allele : alleles) {
final int alleleLength = allele.length();
if (max < alleleLength)
max = alleleLength;
}
return max;
}
protected int findMaxReadLength(final List<GATKSAMRecord> reads) {
@ -147,6 +168,63 @@ public abstract class PairHMM {
return listMaxHaplotypeLength;
}
/**
* Given a list of reads and haplotypes, for every read compute the total probability of said read arising from
* each haplotype given base substitution, insertion, and deletion probabilities.
*
* @param processedReads reads to analyze
* @param likelihoods where to store the likelihoods where position [a][r] is reserved for the likelihood of {@code reads[r]}
* conditional to {@code alleles[a]}.
* @param constantGCP constant penalty for gap continuations.
*
* @return never {@code null}.
*/
public void computeLikelihoods(final ReadLikelihoods.Matrix<Haplotype> likelihoods, final List<GATKSAMRecord> processedReads, final byte constantGCP) {
if(doProfiling)
startTime = System.nanoTime();
// (re)initialize the pairHMM only if necessary
final int readMaxLength = findMaxReadLength(processedReads);
final int haplotypeMaxLength = findMaxAlleleLength(likelihoods.alleles());
if (!initialized || readMaxLength > maxReadLength || haplotypeMaxLength > maxHaplotypeLength)
initialize(readMaxLength, haplotypeMaxLength);
final int readCount = processedReads.size();
final List<Haplotype> alleles = likelihoods.alleles();
final int alleleCount = alleles.size();
mLikelihoodArray = new double[readCount * alleleCount];
int idx = 0;
int readIndex = 0;
for(final GATKSAMRecord read : processedReads){
final int readLength = read.getReadLength();
final byte[] readBases = read.getReadBases();
final byte[] readQuals = read.getBaseQualities();
final byte[] readInsQuals = read.getBaseInsertionQualities();
final byte[] readDelQuals = read.getBaseDeletionQualities();
final byte[] overallGCP = new byte[readLength];
Arrays.fill(overallGCP,constantGCP);
// peak at the next haplotype in the list (necessary to get nextHaplotypeBases, which is required for caching in the array implementation)
final boolean isFirstHaplotype = true;
for (int a = 0; a < alleleCount; a++) {
final Allele allele = alleles.get(a);
final byte[] alleleBases = allele.getBases();
final byte[] nextAlleleBases = a == alleles.size() - 1 ? null : alleles.get(a + 1).getBases();
final double lk = computeReadLikelihoodGivenHaplotypeLog10(alleleBases,
readBases, readQuals, readInsQuals, readDelQuals, overallGCP, isFirstHaplotype, nextAlleleBases);
likelihoods.set(a, readIndex, lk);
mLikelihoodArray[idx++] = lk;
}
readIndex++;
}
if(doProfiling) {
threadLocalPairHMMComputeTimeDiff = (System.nanoTime() - startTime);
//synchronized(doProfiling)
{
pairHMMComputeTime += threadLocalPairHMMComputeTimeDiff;
}
}
}
/**
* Given a list of reads and haplotypes, for every read compute the total probability of said read arising from
* each haplotype given base substitution, insertion, and deletion probabilities.
@ -156,6 +234,7 @@ public abstract class PairHMM {
* @param GCPArrayMap Each read is associated with an array containing the gap continuation penalties for use in the model. Length of each GCP-array must match that of its read.
* @return a PerReadAlleleLikelihoodMap containing each read, haplotype-allele, and the log10 probability of
* said read coming from the said haplotype under the provided error model
* @deprecated
*/
public PerReadAlleleLikelihoodMap computeLikelihoods(final List<GATKSAMRecord> reads, final Map<Allele, Haplotype> alleleHaplotypeMap, final Map<GATKSAMRecord, byte[]> GCPArrayMap) {
if(doProfiling)
@ -178,7 +257,7 @@ public abstract class PairHMM {
// peak at the next haplotype in the list (necessary to get nextHaplotypeBases, which is required for caching in the array implementation)
byte[] currentHaplotypeBases = null;
boolean isFirstHaplotype = true;
final boolean isFirstHaplotype = true;
Allele currentAllele = null;
double log10l;
//for (final Allele allele : alleleHaplotypeMap.keySet()){

View File

@ -31,7 +31,6 @@ import htsjdk.samtools.Cigar;
import htsjdk.samtools.CigarElement;
import htsjdk.samtools.CigarOperator;
import htsjdk.samtools.SAMRecord;
import org.broadinstitute.gatk.engine.GenomeAnalysisEngine;
import org.broadinstitute.gatk.utils.BaseUtils;
import org.broadinstitute.gatk.utils.exceptions.ReviewedGATKException;
import org.broadinstitute.gatk.utils.haplotype.Haplotype;
@ -75,7 +74,11 @@ public final class AlignmentUtils {
* @param haplotype the haplotype that the read should be aligned to, before aligning to the reference
* @param referenceStart the start of the reference that haplotype is aligned to. Provides global coordinate frame.
* @param isInformative true if the read is differentially informative for one of the haplotypes
* @return a GATKSAMRecord aligned to reference, or null if no meaningful alignment is possible
*
* @throws IllegalArgumentException if {@code originalRead} is {@code null} or {@code haplotype} is {@code null} or it
* does not have a Cigar or the {@code referenceStart} is invalid (less than 1).
*
* @return a GATKSAMRecord aligned to reference. Never {@code null}.
*/
public static GATKSAMRecord createReadAlignedToRef(final GATKSAMRecord originalRead,
final Haplotype haplotype,