Merge pull request #147 from broadinstitute/md_hc_symbolic_allele

Large number of fundamental improvements to the HaplotypeCaller
This commit is contained in:
Ryan Poplin 2013-04-08 10:29:07 -07:00
commit 0c2f795fa5
64 changed files with 3692 additions and 1085 deletions

View File

@ -235,7 +235,7 @@ public class SyntheticRead {
read.setReadBases(convertReadBases());
read.setMappingQuality((int) Math.ceil(mappingQuality / basesCountsQuals.size()));
read.setReadGroup(readGroupRecord);
read.setReducedReadCounts(convertBaseCounts());
read.setReducedReadCountsTag(convertBaseCounts());
if (hasIndelQualities) {
read.setBaseQualities(convertInsertionQualities(), EventType.BASE_INSERTION);

View File

@ -49,7 +49,7 @@ package org.broadinstitute.sting.gatk.walkers.genotyper;
import com.google.java.contract.Requires;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.walkers.indels.PairHMMIndelErrorModel;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.QualityUtils;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;

View File

@ -49,7 +49,7 @@ package org.broadinstitute.sting.gatk.walkers.genotyper;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.walkers.genotyper.afcalc.ExactACset;
import org.broadinstitute.sting.gatk.walkers.indels.PairHMMIndelErrorModel;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
import org.broadinstitute.sting.utils.pileup.PileupElement;

View File

@ -53,6 +53,7 @@ import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.gatk.refdata.RefMetaDataTracker;
import org.broadinstitute.sting.gatk.walkers.indels.PairHMMIndelErrorModel;
import org.broadinstitute.sting.utils.*;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.variant.variantcontext.*;
import java.util.*;

View File

@ -55,7 +55,7 @@ import org.broadinstitute.sting.gatk.walkers.indels.PairHMMIndelErrorModel;
import org.broadinstitute.sting.utils.BaseUtils;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;

View File

@ -55,7 +55,8 @@ import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.*;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.exceptions.UserException;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
@ -94,22 +95,25 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
private final boolean debug;
private final boolean debugGraphTransformations;
private final int minKmer;
private final boolean allowCyclesInKmerGraphToGeneratePaths;
private final int onlyBuildKmersOfThisSizeWhenDebuggingGraphAlgorithms;
protected DeBruijnAssembler() {
this(false, -1, 11);
this(false, -1, 11, false);
}
public DeBruijnAssembler(final boolean debug,
final int debugGraphTransformations,
final int minKmer) {
final int minKmer,
final boolean allowCyclesInKmerGraphToGeneratePaths) {
super();
this.debug = debug;
this.debugGraphTransformations = debugGraphTransformations > 0;
this.onlyBuildKmersOfThisSizeWhenDebuggingGraphAlgorithms = debugGraphTransformations;
this.minKmer = minKmer;
this.allowCyclesInKmerGraphToGeneratePaths = allowCyclesInKmerGraphToGeneratePaths;
}
/**
@ -161,8 +165,9 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
if ( debugGraphTransformations ) graph.printGraph(new File("unpruned.dot"), pruneFactor);
if ( shouldErrorCorrectKmers() ) {
graph = errorCorrect(graph);
if ( debugGraphTransformations ) graph.printGraph(new File("errorCorrected.dot"), pruneFactor);
throw new UserException("Error correction no longer supported because of the " +
"incredibly naive way this was implemented. The command line argument remains because some" +
" future subsystem will actually go and error correct the reads");
}
final SeqGraph seqGraph = toSeqGraph(graph);
@ -185,6 +190,14 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
final SeqGraph seqGraph = deBruijnGraph.convertToSequenceGraph();
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.1.dot"), pruneFactor);
// TODO -- we need to come up with a consistent pruning algorithm. The current pruning algorithm
// TODO -- works well but it doesn't differentiate between an isolated chain that doesn't connect
// TODO -- to anything from one that's actuall has good support along the chain but just happens
// TODO -- to have a connection in the middle that has weight of < pruneFactor. Ultimately
// TODO -- the pruning algorithm really should be an error correction algorithm that knows more
// TODO -- about the structure of the data and can differeniate between an infrequent path but
// TODO -- without evidence against it (such as occurs when a region is hard to get any reads through)
// TODO -- from a error with lots of weight going along another similar path
// the very first thing we need to do is zip up the graph, or pruneGraph will be too aggressive
seqGraph.zipLinearChains();
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.2.zipped.dot"), pruneFactor);
@ -206,6 +219,16 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
return null;
seqGraph.removePathsNotConnectedToRef();
seqGraph.simplifyGraph();
if ( seqGraph.vertexSet().size() == 1 ) {
// we've prefectly assembled into a single reference haplotype, add a empty seq vertex to stop
// the code from blowing up.
// TODO -- ref properties should really be on the vertices, not the graph itself
final SeqVertex complete = seqGraph.vertexSet().iterator().next();
final SeqVertex dummy = new SeqVertex("");
seqGraph.addVertex(dummy);
seqGraph.addEdge(complete, dummy, new BaseEdge(true, 0));
}
if ( debugGraphTransformations ) seqGraph.printGraph(new File("sequenceGraph.5.final.dot"), pruneFactor);
return seqGraph;
@ -324,39 +347,6 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
return true;
}
/**
* Error correct the kmers in this graph, returning a new graph built from those error corrected kmers
* @return an error corrected version of this (freshly allocated graph) or simply this graph if for some reason
* we cannot actually do the error correction
*/
public DeBruijnGraph errorCorrect(final DeBruijnGraph graph) {
final KMerErrorCorrector corrector = new KMerErrorCorrector(graph.getKmerSize(), 1, 1, 5); // TODO -- should be static variables
for( final BaseEdge e : graph.edgeSet() ) {
for ( final byte[] kmer : Arrays.asList(graph.getEdgeSource(e).getSequence(), graph.getEdgeTarget(e).getSequence())) {
// TODO -- need a cleaner way to deal with the ref weight
corrector.addKmer(kmer, e.isRef() ? 1000 : e.getMultiplicity());
}
}
if ( corrector.computeErrorCorrectionMap() ) {
final DeBruijnGraph correctedGraph = new DeBruijnGraph(graph.getKmerSize());
for( final BaseEdge e : graph.edgeSet() ) {
final byte[] source = corrector.getErrorCorrectedKmer(graph.getEdgeSource(e).getSequence());
final byte[] target = corrector.getErrorCorrectedKmer(graph.getEdgeTarget(e).getSequence());
if ( source != null && target != null ) {
correctedGraph.addKmersToGraph(source, target, e.isRef(), e.getMultiplicity());
}
}
return correctedGraph;
} else {
// the error correction wasn't possible, simply return this graph
return graph;
}
}
protected void printGraphs(final List<SeqGraph> graphs) {
final int writeFirstGraphWithSizeSmallerThan = 50;
@ -401,7 +391,12 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
}
for( final SeqGraph graph : graphs ) {
for ( final Path<SeqVertex> path : new KBestPaths<SeqVertex>().getKBestPaths(graph, NUM_BEST_PATHS_PER_KMER_GRAPH) ) {
final SeqVertex source = graph.getReferenceSourceVertex();
final SeqVertex sink = graph.getReferenceSinkVertex();
if ( source == null || sink == null ) throw new IllegalArgumentException("Both source and sink cannot be null but got " + source + " and sink " + sink + " for graph "+ graph);
final KBestPaths<SeqVertex> pathFinder = new KBestPaths<SeqVertex>(allowCyclesInKmerGraphToGeneratePaths);
for ( final Path<SeqVertex> path : pathFinder.getKBestPaths(graph, NUM_BEST_PATHS_PER_KMER_GRAPH, source, sink) ) {
// logger.info("Found path " + path);
Haplotype h = new Haplotype( path.getBases() );
if( !returnHaplotypes.contains(h) ) {
@ -432,7 +427,7 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
// for GGA mode, add the desired allele into the haplotype if it isn't already present
if( !activeAllelesToGenotype.isEmpty() ) {
final Map<Integer,VariantContext> eventMap = GenotypingEngine.generateVCsFromAlignment( h, h.getAlignmentStartHapwrtRef(), h.getCigar(), refWithPadding, h.getBases(), refLoc, "HCassembly" ); // BUGBUG: need to put this function in a shared place
final Map<Integer,VariantContext> eventMap = GenotypingEngine.generateVCsFromAlignment( h, refWithPadding, refLoc, "HCassembly" ); // BUGBUG: need to put this function in a shared place
for( final VariantContext compVC : activeAllelesToGenotype ) { // for GGA mode, add the desired allele into the haplotype if it isn't already present
final VariantContext vcOnHaplotype = eventMap.get(compVC.getStart());
@ -453,6 +448,9 @@ public class DeBruijnAssembler extends LocalAssemblyEngine {
}
}
// add genome locs to the haplotypes
for ( final Haplotype h : returnHaplotypes ) h.setGenomeLocation(activeRegionWindow);
if ( returnHaplotypes.size() < returnHaplotypes.size() )
logger.info("Found " + returnHaplotypes.size() + " candidate haplotypes of " + returnHaplotypes.size() + " possible combinations to evaluate every read against at " + refLoc);

View File

@ -48,19 +48,20 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.gatk.walkers.annotator.VariantAnnotatorEngine;
import org.broadinstitute.sting.gatk.walkers.genotyper.GenotypeLikelihoodsCalculationModel;
import org.broadinstitute.sting.gatk.walkers.genotyper.UnifiedGenotyperEngine;
import org.broadinstitute.sting.utils.*;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.haplotype.EventMap;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.haplotype.MergeVariantsAcrossHaplotypes;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.variant.GATKVariantContextUtils;
import org.broadinstitute.sting.utils.BaseUtils;
import org.broadinstitute.variant.variantcontext.*;
import java.io.PrintStream;
@ -72,14 +73,17 @@ public class GenotypingEngine {
private final boolean DEBUG;
private final boolean USE_FILTERED_READ_MAP_FOR_ANNOTATIONS;
private final static List<Allele> noCall = new ArrayList<Allele>(); // used to noCall all genotypes until the exact model is applied
private final static Allele SYMBOLIC_UNASSEMBLED_EVENT_ALLELE = Allele.create("<UNASSEMBLED_EVENT>", false);
private final VariantAnnotatorEngine annotationEngine;
private final MergeVariantsAcrossHaplotypes crossHaplotypeEventMerger;
public GenotypingEngine( final boolean DEBUG, final VariantAnnotatorEngine annotationEngine, final boolean USE_FILTERED_READ_MAP_FOR_ANNOTATIONS ) {
public GenotypingEngine( final boolean DEBUG, final VariantAnnotatorEngine annotationEngine,
final boolean USE_FILTERED_READ_MAP_FOR_ANNOTATIONS,
final MergeVariantsAcrossHaplotypes crossHaplotypeEventMerger) {
this.DEBUG = DEBUG;
this.annotationEngine = annotationEngine;
this.USE_FILTERED_READ_MAP_FOR_ANNOTATIONS = USE_FILTERED_READ_MAP_FOR_ANNOTATIONS;
noCall.add(Allele.NO_CALL);
this.crossHaplotypeEventMerger = crossHaplotypeEventMerger;
}
/**
@ -119,9 +123,10 @@ public class GenotypingEngine {
* Main entry point of class - given a particular set of haplotypes, samples and reference context, compute
* genotype likelihoods and assemble into a list of variant contexts and genomic events ready for calling
*
* The list of samples we're working with is obtained from the haplotypeReadMap
*
* @param UG_engine UG Engine with basic input parameters
* @param haplotypes Haplotypes to assign likelihoods to
* @param samples Samples to genotype
* @param haplotypeReadMap Map from reads->(haplotypes,likelihoods)
* @param perSampleFilteredReadList
* @param ref Reference bytes at active region
@ -136,7 +141,6 @@ public class GenotypingEngine {
// TODO - can this be refactored? this is hard to follow!
public CalledHaplotypes assignGenotypeLikelihoods( final UnifiedGenotyperEngine UG_engine,
final List<Haplotype> haplotypes,
final List<String> samples,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList,
final byte[] ref,
@ -145,99 +149,25 @@ public class GenotypingEngine {
final GenomeLocParser genomeLocParser,
final List<VariantContext> activeAllelesToGenotype ) {
// sanity check input arguments
if (UG_engine == null)
throw new IllegalArgumentException("UG_Engine input can't be null, got "+UG_engine);
if (haplotypes == null || haplotypes.isEmpty())
throw new IllegalArgumentException("haplotypes input should be non-empty and non-null, got "+haplotypes);
if (samples == null || samples.isEmpty())
throw new IllegalArgumentException("samples input must be non-empty and non-null, got "+samples);
if (haplotypeReadMap == null || haplotypeReadMap.isEmpty())
throw new IllegalArgumentException("haplotypeReadMap input should be non-empty and non-null, got "+haplotypeReadMap);
if (ref == null || ref.length == 0 )
throw new IllegalArgumentException("ref bytes input should be non-empty and non-null, got "+ref);
if (refLoc == null || refLoc.getStop()-refLoc.getStart()+1 != ref.length)
throw new IllegalArgumentException(" refLoc must be non-null and length must match ref bytes, got "+refLoc);
if (activeRegionWindow == null )
throw new IllegalArgumentException("activeRegionWindow must be non-null, got "+activeRegionWindow);
if (activeAllelesToGenotype == null )
throw new IllegalArgumentException("activeAllelesToGenotype must be non-null, got "+activeAllelesToGenotype);
if (genomeLocParser == null )
throw new IllegalArgumentException("genomeLocParser must be non-null, got "+genomeLocParser);
if (UG_engine == null) throw new IllegalArgumentException("UG_Engine input can't be null, got "+UG_engine);
if (haplotypes == null || haplotypes.isEmpty()) throw new IllegalArgumentException("haplotypes input should be non-empty and non-null, got "+haplotypes);
if (haplotypeReadMap == null || haplotypeReadMap.isEmpty()) throw new IllegalArgumentException("haplotypeReadMap input should be non-empty and non-null, got "+haplotypeReadMap);
if (ref == null || ref.length == 0 ) throw new IllegalArgumentException("ref bytes input should be non-empty and non-null, got "+ref);
if (refLoc == null || refLoc.size() != ref.length) throw new IllegalArgumentException(" refLoc must be non-null and length must match ref bytes, got "+refLoc);
if (activeRegionWindow == null ) throw new IllegalArgumentException("activeRegionWindow must be non-null, got "+activeRegionWindow);
if (activeAllelesToGenotype == null ) throw new IllegalArgumentException("activeAllelesToGenotype must be non-null, got "+activeAllelesToGenotype);
if (genomeLocParser == null ) throw new IllegalArgumentException("genomeLocParser must be non-null, got "+genomeLocParser);
final List<VariantContext> returnCalls = new ArrayList<VariantContext>();
final boolean in_GGA_mode = !activeAllelesToGenotype.isEmpty();
// Using the cigar from each called haplotype figure out what events need to be written out in a VCF file
final TreeSet<Integer> startPosKeySet = new TreeSet<Integer>();
int count = 0;
if( DEBUG ) { logger.info("=== Best Haplotypes ==="); }
for( final Haplotype h : haplotypes ) {
// Walk along the alignment and turn any difference from the reference into an event
h.setEventMap( generateVCsFromAlignment( h, h.getAlignmentStartHapwrtRef(), h.getCigar(), ref, h.getBases(), refLoc, "HC" + count++ ) );
if( !in_GGA_mode ) { startPosKeySet.addAll(h.getEventMap().keySet()); }
if( DEBUG ) {
logger.info(h.toString());
logger.info("> Cigar = " + h.getCigar());
logger.info(">> Events = " + h.getEventMap());
}
}
cleanUpSymbolicUnassembledEvents( haplotypes );
if( !in_GGA_mode && samples.size() >= 10 ) { // if not in GGA mode and have at least 10 samples try to create MNP and complex events by looking at LD structure
mergeConsecutiveEventsBasedOnLD( haplotypes, samples, haplotypeReadMap, startPosKeySet, ref, refLoc );
cleanUpSymbolicUnassembledEvents( haplotypes ); // the newly created merged events could be overlapping the unassembled events
}
if( in_GGA_mode ) {
for( final VariantContext compVC : activeAllelesToGenotype ) {
startPosKeySet.add( compVC.getStart() );
}
}
final Set<Haplotype> calledHaplotypes = new HashSet<Haplotype>();
// update the haplotypes so we're ready to call, getting the ordered list of positions on the reference
// that carry events among the haplotypes
final TreeSet<Integer> startPosKeySet = decomposeHaplotypesIntoVariantContexts(haplotypes, haplotypeReadMap, ref, refLoc, activeAllelesToGenotype);
// Walk along each position in the key set and create each event to be outputted
final Set<Haplotype> calledHaplotypes = new HashSet<Haplotype>();
final List<VariantContext> returnCalls = new ArrayList<VariantContext>();
for( final int loc : startPosKeySet ) {
if( loc >= activeRegionWindow.getStart() && loc <= activeRegionWindow.getStop() ) { // genotyping an event inside this active region
final List<VariantContext> eventsAtThisLoc = new ArrayList<VariantContext>(); // the overlapping events to merge into a common reference view
final List<String> priorityList = new ArrayList<String>(); // used to merge overlapping events into common reference view
if( !in_GGA_mode ) {
for( final Haplotype h : haplotypes ) {
final Map<Integer,VariantContext> eventMap = h.getEventMap();
final VariantContext vc = eventMap.get(loc);
if( vc != null && !containsVCWithMatchingAlleles(eventsAtThisLoc, vc) ) {
eventsAtThisLoc.add(vc);
priorityList.add(vc.getSource());
}
}
} else { // we are in GGA mode!
int compCount = 0;
for( final VariantContext compVC : activeAllelesToGenotype ) {
if( compVC.getStart() == loc ) {
int alleleCount = 0;
for( final Allele compAltAllele : compVC.getAlternateAlleles() ) {
List<Allele> alleleSet = new ArrayList<Allele>(2);
alleleSet.add(compVC.getReference());
alleleSet.add(compAltAllele);
final String vcSourceName = "Comp" + compCount + "Allele" + alleleCount;
// check if this event is already in the list of events due to a repeat in the input alleles track
final VariantContext candidateEventToAdd = new VariantContextBuilder(compVC).alleles(alleleSet).source(vcSourceName).make();
boolean alreadyExists = false;
for( final VariantContext eventToTest : eventsAtThisLoc ) {
if( eventToTest.hasSameAllelesAs(candidateEventToAdd) ) {
alreadyExists = true;
}
}
if( !alreadyExists ) {
priorityList.add(vcSourceName);
eventsAtThisLoc.add(candidateEventToAdd);
}
alleleCount++;
}
}
compCount++;
}
}
final List<VariantContext> eventsAtThisLoc = getVCsAtThisLocation(haplotypes, loc, activeAllelesToGenotype);
if( eventsAtThisLoc.isEmpty() ) { continue; }
@ -245,7 +175,7 @@ public class GenotypingEngine {
final Map<Event, List<Haplotype>> eventMapper = createEventMapper(loc, eventsAtThisLoc, haplotypes);
// Sanity check the priority list for mistakes
validatePriorityList( priorityList, eventsAtThisLoc );
final List<String> priorityList = makePriorityList(eventsAtThisLoc);
// Merge the event to find a common reference representation
final VariantContext mergedVC = GATKVariantContextUtils.simpleMerge(eventsAtThisLoc, priorityList, GATKVariantContextUtils.FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED, GATKVariantContextUtils.GenotypeMergeType.PRIORITIZE, false, false, null, false, false);
@ -264,12 +194,11 @@ public class GenotypingEngine {
if( DEBUG ) {
logger.info("Genotyping event at " + loc + " with alleles = " + mergedVC.getAlleles());
//System.out.println("Event/haplotype allele mapping = " + alleleMapper);
}
final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap = convertHaplotypeReadMapToAlleleReadMap( haplotypeReadMap, alleleMapper, UG_engine.getUAC().CONTAMINATION_FRACTION, UG_engine.getUAC().contaminationLog );
final GenotypesContext genotypes = calculateGLsForThisEvent( samples, alleleReadMap, mergedVC );
final GenotypesContext genotypes = calculateGLsForThisEvent( alleleReadMap, mergedVC );
final VariantContext call = UG_engine.calculateGenotypes(new VariantContextBuilder(mergedVC).genotypes(genotypes).make(), mergedVC.isSNP() ? GenotypeLikelihoodsCalculationModel.Model.SNP : GenotypeLikelihoodsCalculationModel.Model.INDEL);
if( call != null ) {
final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap_annotations = ( USE_FILTERED_READ_MAP_FOR_ANNOTATIONS ? alleleReadMap :
@ -277,7 +206,6 @@ public class GenotypingEngine {
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = filterToOnlyOverlappingReads( genomeLocParser, alleleReadMap_annotations, perSampleFilteredReadList, call );
VariantContext annotatedCall = call;
// TODO -- should be before annotated call, so that QDL works correctly
if( annotatedCall.getAlleles().size() != mergedVC.getAlleles().size() ) { // some alleles were removed so reverseTrimming might be necessary!
annotatedCall = GATKVariantContextUtils.reverseTrimAlleles(annotatedCall);
}
@ -295,50 +223,127 @@ public class GenotypingEngine {
return new CalledHaplotypes(returnCalls, calledHaplotypes);
}
/**
* Go through the haplotypes we assembled, and decompose them into their constituent variant contexts
*
* @param haplotypes the list of haplotypes we're working with
* @param haplotypeReadMap map from samples -> the per read allele likelihoods
* @param ref the reference bases (over the same interval as the haplotypes)
* @param refLoc the span of the reference bases
* @param activeAllelesToGenotype alleles we want to ensure are scheduled for genotyping (GGA mode)
* @return
*/
private TreeSet<Integer> decomposeHaplotypesIntoVariantContexts(final List<Haplotype> haplotypes,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final byte[] ref,
final GenomeLoc refLoc,
final List<VariantContext> activeAllelesToGenotype) {
final boolean in_GGA_mode = !activeAllelesToGenotype.isEmpty();
// Using the cigar from each called haplotype figure out what events need to be written out in a VCF file
final TreeSet<Integer> startPosKeySet = EventMap.buildEventMapsForHaplotypes(haplotypes, ref, refLoc, DEBUG);
if ( in_GGA_mode ) startPosKeySet.clear();
cleanUpSymbolicUnassembledEvents( haplotypes );
if ( !in_GGA_mode ) {
// run the event merger if we're not in GGA mode
final boolean mergedAnything = crossHaplotypeEventMerger.merge(haplotypes, haplotypeReadMap, startPosKeySet, ref, refLoc);
if ( mergedAnything )
cleanUpSymbolicUnassembledEvents( haplotypes ); // the newly created merged events could be overlapping the unassembled events
}
if ( in_GGA_mode ) {
for( final VariantContext compVC : activeAllelesToGenotype ) {
startPosKeySet.add( compVC.getStart() );
}
}
return startPosKeySet;
}
/**
* Get the priority list (just the list of sources for these variant context) used to merge overlapping events into common reference view
* @param vcs a list of variant contexts
* @return the list of the sources of vcs in the same order
*/
private List<String> makePriorityList(final List<VariantContext> vcs) {
final List<String> priorityList = new LinkedList<String>();
for ( final VariantContext vc : vcs ) priorityList.add(vc.getSource());
return priorityList;
}
private List<VariantContext> getVCsAtThisLocation(final List<Haplotype> haplotypes,
final int loc,
final List<VariantContext> activeAllelesToGenotype) {
// the overlapping events to merge into a common reference view
final List<VariantContext> eventsAtThisLoc = new ArrayList<VariantContext>();
if( activeAllelesToGenotype.isEmpty() ) {
for( final Haplotype h : haplotypes ) {
final EventMap eventMap = h.getEventMap();
final VariantContext vc = eventMap.get(loc);
if( vc != null && !containsVCWithMatchingAlleles(eventsAtThisLoc, vc) ) {
eventsAtThisLoc.add(vc);
}
}
} else { // we are in GGA mode!
int compCount = 0;
for( final VariantContext compVC : activeAllelesToGenotype ) {
if( compVC.getStart() == loc ) {
int alleleCount = 0;
for( final Allele compAltAllele : compVC.getAlternateAlleles() ) {
List<Allele> alleleSet = new ArrayList<Allele>(2);
alleleSet.add(compVC.getReference());
alleleSet.add(compAltAllele);
final String vcSourceName = "Comp" + compCount + "Allele" + alleleCount;
// check if this event is already in the list of events due to a repeat in the input alleles track
final VariantContext candidateEventToAdd = new VariantContextBuilder(compVC).alleles(alleleSet).source(vcSourceName).make();
boolean alreadyExists = false;
for( final VariantContext eventToTest : eventsAtThisLoc ) {
if( eventToTest.hasSameAllelesAs(candidateEventToAdd) ) {
alreadyExists = true;
}
}
if( !alreadyExists ) {
eventsAtThisLoc.add(candidateEventToAdd);
}
alleleCount++;
}
}
compCount++;
}
}
return eventsAtThisLoc;
}
/**
* For a particular event described in inputVC, form PL vector for each sample by looking into allele read map and filling likelihood matrix for each allele
* @param samples List of samples to genotype
* @param alleleReadMap Allele map describing mapping from reads to alleles and corresponding likelihoods
* @param mergedVC Input VC with event to genotype
* @return GenotypesContext object wrapping genotype objects with PLs
*/
@Requires({"samples != null","alleleReadMap!= null", "mergedVC != null"})
@Requires({"alleleReadMap!= null", "mergedVC != null"})
@Ensures("result != null")
private GenotypesContext calculateGLsForThisEvent( final List<String> samples, final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap, final VariantContext mergedVC ) {
final GenotypesContext genotypes = GenotypesContext.create(samples.size());
private GenotypesContext calculateGLsForThisEvent( final Map<String, PerReadAlleleLikelihoodMap> alleleReadMap, final VariantContext mergedVC ) {
final GenotypesContext genotypes = GenotypesContext.create(alleleReadMap.size());
// Grab the genotype likelihoods from the appropriate places in the haplotype likelihood matrix -- calculation performed independently per sample
for( final String sample : samples ) {
for( final String sample : alleleReadMap.keySet() ) {
final int numHaplotypes = mergedVC.getAlleles().size();
final double[] genotypeLikelihoods = new double[numHaplotypes * (numHaplotypes+1) / 2];
final double[][] haplotypeLikelihoodMatrix = LikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods(sample, alleleReadMap, mergedVC.getAlleles());
final double[][] haplotypeLikelihoodMatrix = LikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods(sample, alleleReadMap, mergedVC.getAlleles(), true);
int glIndex = 0;
for( int iii = 0; iii < numHaplotypes; iii++ ) {
for( int jjj = 0; jjj <= iii; jjj++ ) {
genotypeLikelihoods[glIndex++] = haplotypeLikelihoodMatrix[iii][jjj]; // for example: AA,AB,BB,AC,BC,CC
}
}
genotypes.add( new GenotypeBuilder(sample).alleles(noCall).PL(genotypeLikelihoods).make() );
genotypes.add(new GenotypeBuilder(sample).alleles(noCall).PL(genotypeLikelihoods).make());
}
return genotypes;
}
private void validatePriorityList( final List<String> priorityList, final List<VariantContext> eventsAtThisLoc ) {
for( final VariantContext vc : eventsAtThisLoc ) {
if( !priorityList.contains(vc.getSource()) ) {
throw new ReviewedStingException("Event found on haplotype that wasn't added to priority list. Something went wrong in the merging of alleles.");
}
}
for( final String name : priorityList ) {
boolean found = false;
for( final VariantContext vc : eventsAtThisLoc ) {
if(vc.getSource().equals(name)) { found = true; break; }
}
if( !found ) {
throw new ReviewedStingException("Event added to priority list but wasn't found on any haplotype. Something went wrong in the merging of alleles.");
}
}
}
private static Map<String, PerReadAlleleLikelihoodMap> filterToOnlyOverlappingReads( final GenomeLocParser parser,
final Map<String, PerReadAlleleLikelihoodMap> perSampleReadMap,
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList,
@ -382,10 +387,10 @@ public class GenotypingEngine {
protected static void cleanUpSymbolicUnassembledEvents( final List<Haplotype> haplotypes ) {
final List<Haplotype> haplotypesToRemove = new ArrayList<Haplotype>();
for( final Haplotype h : haplotypes ) {
for( final VariantContext vc : h.getEventMap().values() ) {
for( final VariantContext vc : h.getEventMap().getVariantContexts() ) {
if( vc.isSymbolic() ) {
for( final Haplotype h2 : haplotypes ) {
for( final VariantContext vc2 : h2.getEventMap().values() ) {
for( final VariantContext vc2 : h2.getEventMap().getVariantContexts() ) {
if( vc.getStart() == vc2.getStart() && (vc2.isIndel() || vc2.isMNP()) ) { // unfortunately symbolic alleles can't currently be combined with non-point events
haplotypesToRemove.add(h);
break;
@ -426,158 +431,6 @@ public class GenotypingEngine {
return alleleReadMap;
}
/**
* TODO - comment me, clean me, refactor me!
* @param haplotypes
* @param samples
* @param haplotypeReadMap
* @param startPosKeySet
* @param ref
* @param refLoc
*/
protected void mergeConsecutiveEventsBasedOnLD( final List<Haplotype> haplotypes,
final List<String> samples,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final TreeSet<Integer> startPosKeySet,
final byte[] ref,
final GenomeLoc refLoc ) {
final int MAX_SIZE_TO_COMBINE = 15;
final double MERGE_EVENTS_R2_THRESHOLD = 0.95;
if( startPosKeySet.size() <= 1 ) { return; }
boolean mapWasUpdated = true;
while( mapWasUpdated ) {
mapWasUpdated = false;
// loop over the set of start locations and consider pairs that start near each other
final Iterator<Integer> iter = startPosKeySet.iterator();
int thisStart = iter.next();
while( iter.hasNext() ) {
final int nextStart = iter.next();
if( nextStart - thisStart < MAX_SIZE_TO_COMBINE) {
boolean isBiallelic = true;
VariantContext thisVC = null;
VariantContext nextVC = null;
double x11 = Double.NEGATIVE_INFINITY;
double x12 = Double.NEGATIVE_INFINITY;
double x21 = Double.NEGATIVE_INFINITY;
double x22 = Double.NEGATIVE_INFINITY;
for( final Haplotype h : haplotypes ) {
// only make complex substitutions out of consecutive biallelic sites
final VariantContext thisHapVC = h.getEventMap().get(thisStart);
if( thisHapVC != null && !thisHapVC.isSymbolic() ) { // something was found at this location on this haplotype
if( thisVC == null ) {
thisVC = thisHapVC;
} else if( !thisHapVC.hasSameAllelesAs( thisVC ) ) {
isBiallelic = false;
break;
}
}
final VariantContext nextHapVC = h.getEventMap().get(nextStart);
if( nextHapVC != null && !nextHapVC.isSymbolic() ) { // something was found at the next location on this haplotype
if( nextVC == null ) {
nextVC = nextHapVC;
} else if( !nextHapVC.hasSameAllelesAs( nextVC ) ) {
isBiallelic = false;
break;
}
}
// count up the co-occurrences of the events for the R^2 calculation
for( final String sample : samples ) {
final double haplotypeLikelihood = LikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods( Collections.singleton(sample), haplotypeReadMap, Collections.singletonList(Allele.create(h, true)) )[0][0];
if( thisHapVC == null ) {
if( nextHapVC == null ) { x11 = MathUtils.approximateLog10SumLog10(x11, haplotypeLikelihood); }
else { x12 = MathUtils.approximateLog10SumLog10(x12, haplotypeLikelihood); }
} else {
if( nextHapVC == null ) { x21 = MathUtils.approximateLog10SumLog10(x21, haplotypeLikelihood); }
else { x22 = MathUtils.approximateLog10SumLog10(x22, haplotypeLikelihood); }
}
}
}
if( thisVC == null || nextVC == null ) {
continue;
}
if( isBiallelic ) {
final double R2 = calculateR2LD( Math.pow(10.0, x11), Math.pow(10.0, x12), Math.pow(10.0, x21), Math.pow(10.0, x22) );
if( DEBUG ) {
logger.info("Found consecutive biallelic events with R^2 = " + String.format("%.4f", R2));
logger.info("-- " + thisVC);
logger.info("-- " + nextVC);
}
if( R2 > MERGE_EVENTS_R2_THRESHOLD ) {
final VariantContext mergedVC = createMergedVariantContext(thisVC, nextVC, ref, refLoc);
// remove the old event from the eventMap on every haplotype and the start pos key set, replace with merged event
for( final Haplotype h : haplotypes ) {
final Map<Integer, VariantContext> eventMap = h.getEventMap();
if( eventMap.containsKey(thisStart) && eventMap.containsKey(nextStart) ) {
eventMap.remove(thisStart);
eventMap.remove(nextStart);
eventMap.put(mergedVC.getStart(), mergedVC);
}
}
startPosKeySet.add(mergedVC.getStart());
boolean containsStart = false;
boolean containsNext = false;
for( final Haplotype h : haplotypes ) {
final Map<Integer, VariantContext> eventMap = h.getEventMap();
if( eventMap.containsKey(thisStart) ) { containsStart = true; }
if( eventMap.containsKey(nextStart) ) { containsNext = true; }
}
if(!containsStart) { startPosKeySet.remove(thisStart); }
if(!containsNext) { startPosKeySet.remove(nextStart); }
if( DEBUG ) { logger.info("====> " + mergedVC); }
mapWasUpdated = true;
break; // break out of tree set iteration since it was just updated, start over from the beginning and keep merging events
}
}
}
thisStart = nextStart;
}
}
}
// BUGBUG: make this merge function more general
protected static VariantContext createMergedVariantContext( final VariantContext thisVC, final VariantContext nextVC, final byte[] ref, final GenomeLoc refLoc ) {
final int thisStart = thisVC.getStart();
final int nextStart = nextVC.getStart();
byte[] refBases = new byte[]{};
byte[] altBases = new byte[]{};
refBases = ArrayUtils.addAll(refBases, thisVC.getReference().getBases());
altBases = ArrayUtils.addAll(altBases, thisVC.getAlternateAllele(0).getBases());
int locus;
for( locus = thisStart + refBases.length; locus < nextStart; locus++ ) {
final byte refByte = ref[locus - refLoc.getStart()];
refBases = ArrayUtils.add(refBases, refByte);
altBases = ArrayUtils.add(altBases, refByte);
}
refBases = ArrayUtils.addAll(refBases, ArrayUtils.subarray(nextVC.getReference().getBases(), locus > nextStart ? 1 : 0, nextVC.getReference().getBases().length)); // special case of deletion including the padding base of consecutive indel
altBases = ArrayUtils.addAll(altBases, nextVC.getAlternateAllele(0).getBases());
int iii = 0;
if( refBases.length == altBases.length ) { // insertion + deletion of same length creates an MNP --> trim common prefix bases off the beginning of the allele
while( iii < refBases.length && refBases[iii] == altBases[iii] ) { iii++; }
}
final List<Allele> mergedAlleles = new ArrayList<Allele>();
mergedAlleles.add( Allele.create( ArrayUtils.subarray(refBases, iii, refBases.length), true ) );
mergedAlleles.add( Allele.create( ArrayUtils.subarray(altBases, iii, altBases.length), false ) );
return new VariantContextBuilder("merged", thisVC.getChr(), thisVC.getStart() + iii, nextVC.getEnd(), mergedAlleles).make();
}
protected static double calculateR2LD( final double x11, final double x12, final double x21, final double x22 ) {
final double total = x11 + x12 + x21 + x22;
final double pa1b1 = x11 / total;
final double pa1b2 = x12 / total;
final double pa2b1 = x21 / total;
final double pa1 = pa1b1 + pa1b2;
final double pb1 = pa1b1 + pa2b1;
return ((pa1b1 - pa1*pb1) * (pa1b1 - pa1*pb1)) / ( pa1 * (1.0 - pa1) * pb1 * (1.0 - pb1) );
}
protected static Map<Allele, List<Haplotype>> createAlleleMapper( final Map<VariantContext, Allele> mergeMap, final Map<Event, List<Haplotype>> eventMap ) {
final Map<Allele, List<Haplotype>> alleleMapper = new LinkedHashMap<Allele, List<Haplotype>>();
for( final Map.Entry<VariantContext, Allele> entry : mergeMap.entrySet() ) {
@ -604,8 +457,8 @@ public class GenotypingEngine {
alleles.add(h.getArtificialRefAllele());
alleles.add(h.getArtificialAltAllele());
final Event artificialVC = new Event( (new VariantContextBuilder()).source("artificialHaplotype")
.alleles(alleles)
.loc(refVC.getChr(), refVC.getStart(), refVC.getStart() + h.getArtificialRefAllele().length() - 1).make() );
.alleles(alleles)
.loc(refVC.getChr(), refVC.getStart(), refVC.getStart() + h.getArtificialRefAllele().length() - 1).make() );
if( eventMapper.containsKey(artificialVC) ) {
eventMapper.get(artificialVC).add(h);
}
@ -697,6 +550,11 @@ public class GenotypingEngine {
return eventAllelesForSample;
}
@Deprecated
protected static Map<Integer,VariantContext> generateVCsFromAlignment( final Haplotype haplotype, final byte[] ref, final GenomeLoc refLoc, final String sourceNameToAdd ) {
return new EventMap(haplotype, ref, refLoc, sourceNameToAdd);
}
protected static boolean containsVCWithMatchingAlleles( final List<VariantContext> list, final VariantContext vcToTest ) {
for( final VariantContext vc : list ) {
if( vc.hasSameAllelesAs(vcToTest) ) {
@ -706,94 +564,6 @@ public class GenotypingEngine {
return false;
}
protected static Map<Integer,VariantContext> generateVCsFromAlignment( final Haplotype haplotype, final int alignmentStartHapwrtRef, final Cigar cigar, final byte[] ref, final byte[] alignment, final GenomeLoc refLoc, final String sourceNameToAdd ) {
final Map<Integer,VariantContext> vcs = new LinkedHashMap<Integer,VariantContext>();
int refPos = alignmentStartHapwrtRef;
if( refPos < 0 ) { return null; } // Protection against SW failures
int alignmentPos = 0;
for( int cigarIndex = 0; cigarIndex < cigar.numCigarElements(); cigarIndex++ ) {
final CigarElement ce = cigar.getCigarElement(cigarIndex);
final int elementLength = ce.getLength();
switch( ce.getOperator() ) {
case I:
{
if( refPos > 0 ) { // protect against trying to create insertions/deletions at the beginning of a contig
final List<Allele> insertionAlleles = new ArrayList<Allele>();
final int insertionStart = refLoc.getStart() + refPos - 1;
final byte refByte = ref[refPos-1];
if( BaseUtils.isRegularBase(refByte) ) {
insertionAlleles.add( Allele.create(refByte, true) );
}
if( cigarIndex == 0 || cigarIndex == cigar.getCigarElements().size() - 1 ) { // if the insertion isn't completely resolved in the haplotype then make it a symbolic allele
insertionAlleles.add( SYMBOLIC_UNASSEMBLED_EVENT_ALLELE );
} else {
byte[] insertionBases = new byte[]{};
insertionBases = ArrayUtils.add(insertionBases, ref[refPos-1]); // add the padding base
insertionBases = ArrayUtils.addAll(insertionBases, Arrays.copyOfRange( alignment, alignmentPos, alignmentPos + elementLength ));
if( BaseUtils.isAllRegularBases(insertionBases) ) {
insertionAlleles.add( Allele.create(insertionBases, false) );
}
}
if( insertionAlleles.size() == 2 ) { // found a proper ref and alt allele
vcs.put(insertionStart, new VariantContextBuilder(sourceNameToAdd, refLoc.getContig(), insertionStart, insertionStart, insertionAlleles).make());
}
}
alignmentPos += elementLength;
break;
}
case S:
{
alignmentPos += elementLength;
break;
}
case D:
{
if( refPos > 0 ) { // protect against trying to create insertions/deletions at the beginning of a contig
final byte[] deletionBases = Arrays.copyOfRange( ref, refPos - 1, refPos + elementLength ); // add padding base
final List<Allele> deletionAlleles = new ArrayList<Allele>();
final int deletionStart = refLoc.getStart() + refPos - 1;
final byte refByte = ref[refPos-1];
if( BaseUtils.isRegularBase(refByte) && BaseUtils.isAllRegularBases(deletionBases) ) {
deletionAlleles.add( Allele.create(deletionBases, true) );
deletionAlleles.add( Allele.create(refByte, false) );
vcs.put(deletionStart, new VariantContextBuilder(sourceNameToAdd, refLoc.getContig(), deletionStart, deletionStart + elementLength, deletionAlleles).make());
}
}
refPos += elementLength;
break;
}
case M:
case EQ:
case X:
{
for( int iii = 0; iii < elementLength; iii++ ) {
final byte refByte = ref[refPos];
final byte altByte = alignment[alignmentPos];
if( refByte != altByte ) { // SNP!
if( BaseUtils.isRegularBase(refByte) && BaseUtils.isRegularBase(altByte) ) {
final List<Allele> snpAlleles = new ArrayList<Allele>();
snpAlleles.add( Allele.create( refByte, true ) );
snpAlleles.add( Allele.create( altByte, false ) );
vcs.put(refLoc.getStart() + refPos, new VariantContextBuilder(sourceNameToAdd, refLoc.getContig(), refLoc.getStart() + refPos, refLoc.getStart() + refPos, snpAlleles).make());
}
}
refPos++;
alignmentPos++;
}
break;
}
case N:
case H:
case P:
default:
throw new ReviewedStingException( "Unsupported cigar operator created during SW alignment: " + ce.getOperator() );
}
}
return vcs;
}
protected static class Event {
public VariantContext vc;

View File

@ -77,6 +77,7 @@ import org.broadinstitute.sting.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.sting.utils.fragments.FragmentCollection;
import org.broadinstitute.sting.utils.fragments.FragmentUtils;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.haplotype.*;
import org.broadinstitute.sting.utils.haplotypeBAMWriter.HaplotypeBAMWriter;
import org.broadinstitute.sting.utils.help.DocumentedGATKFeature;
import org.broadinstitute.sting.utils.help.HelpConstants;
@ -133,7 +134,7 @@ import java.util.*;
@DocumentedGATKFeature( groupName = HelpConstants.DOCS_CAT_VARDISC, extraDocs = {CommandLineGATK.class} )
@PartitionBy(PartitionType.LOCUS)
@BAQMode(ApplicationTime = ReadTransformer.ApplicationTime.FORBIDDEN)
@ActiveRegionTraversalParameters(extension=85, maxRegion=300)
@ActiveRegionTraversalParameters(extension=200, maxRegion=300)
@ReadFilters({HCMappingQualityFilter.class})
@Downsample(by= DownsampleType.BY_SAMPLE, toCoverage=250)
public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implements AnnotatorCompatible {
@ -196,7 +197,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
@Advanced
@Argument(fullName="minPruning", shortName="minPruning", doc = "The minimum allowed pruning factor in assembly graph. Paths with <= X supporting kmers are pruned from the graph", required = false)
protected int MIN_PRUNE_FACTOR = 1;
protected int MIN_PRUNE_FACTOR = 0;
@Advanced
@Argument(fullName="gcpHMM", shortName="gcpHMM", doc="Flat gap continuation penalty for use in the Pair HMM", required = false)
@ -280,6 +281,10 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
@Argument(fullName="excludeAnnotation", shortName="XA", doc="One or more specific annotations to exclude", required=false)
protected List<String> annotationsToExclude = new ArrayList<String>(Arrays.asList(new String[]{"SpanningDeletions", "TandemRepeatAnnotator"}));
@Advanced
@Argument(fullName="dontMergeVariantsViaLD", shortName="dontMergeVariantsViaLD", doc="If specified, we will include low quality bases when doing the assembly", required = false)
protected boolean dontMergeVariantsViaLD = false;
/**
* Which groups of annotations to add to the output VCF file. See the VariantAnnotator -list argument to view available groups.
*/
@ -297,10 +302,19 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
@Argument(fullName="debugGraphTransformations", shortName="debugGraphTransformations", doc="If specified, we will write DOT formatted graph files out of the assembler for only this graph size", required = false)
protected int debugGraphTransformations = -1;
@Hidden
@Hidden // TODO -- not currently useful
@Argument(fullName="useLowQualityBasesForAssembly", shortName="useLowQualityBasesForAssembly", doc="If specified, we will include low quality bases when doing the assembly", required = false)
protected boolean useLowQualityBasesForAssembly = false;
@Hidden
@Argument(fullName="dontTrimActiveRegions", shortName="donTrimActiveRegions", doc="If specified, we will not trim down the active region from the full region (active + extension) to just the active interval for genotyping", required = false)
protected boolean dontTrimActiveRegions = false;
@Hidden
@Argument(fullName="allowCyclesInKmerGraphToGeneratePaths", shortName="allowCyclesInKmerGraphToGeneratePaths", doc="If specified, we will allow cycles in the kmer graphs to generate paths with multiple copies of the path sequenece rather than just the shortest paths", required = false)
protected boolean allowCyclesInKmerGraphToGeneratePaths = false;
// the UG engines
private UnifiedGenotyperEngine UG_engine = null;
private UnifiedGenotyperEngine UG_engine_simple_genotyper = null;
@ -322,6 +336,13 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
// reference base padding size
private static final int REFERENCE_PADDING = 500;
// include at least this many bases around an event for calling it
private final static int PADDING_AROUND_SNPS_FOR_CALLING = 20;
private final static int PADDING_AROUND_OTHERS_FOR_CALLING = 150;
// the maximum extent into the full active region extension that we're willing to go in genotyping our events
private final static int MAX_GENOTYPING_ACTIVE_REGION_EXTENSION = 25;
private final static int maxReadsInRegionPerSample = 1000; // TODO -- should be an argument
private final static int minReadsPerAlignmentStart = 5; // TODO -- should be an argument
@ -404,14 +425,19 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
}
// setup the assembler
assemblyEngine = new DeBruijnAssembler( DEBUG, debugGraphTransformations, minKmer);
assemblyEngine = new DeBruijnAssembler(DEBUG, debugGraphTransformations, minKmer, allowCyclesInKmerGraphToGeneratePaths);
assemblyEngine.setErrorCorrectKmers(errorCorrectKmers);
assemblyEngine.setPruneFactor(MIN_PRUNE_FACTOR);
if ( graphWriter != null ) assemblyEngine.setGraphWriter(graphWriter);
if ( useLowQualityBasesForAssembly ) assemblyEngine.setMinBaseQualityToUseInAssembly((byte)1);
likelihoodCalculationEngine = new LikelihoodCalculationEngine( (byte)gcpHMM, DEBUG, pairHMM );
genotypingEngine = new GenotypingEngine( DEBUG, annotationEngine, USE_FILTERED_READ_MAP_FOR_ANNOTATIONS );
final MergeVariantsAcrossHaplotypes variantMerger = dontMergeVariantsViaLD
? new MergeVariantsAcrossHaplotypes()
: new LDMerger(DEBUG, 10, 1);
genotypingEngine = new GenotypingEngine( DEBUG, annotationEngine, USE_FILTERED_READ_MAP_FOR_ANNOTATIONS, variantMerger );
if ( bamWriter != null )
haplotypeBAMWriter = HaplotypeBAMWriter.create(bamWriterType, bamWriter, getToolkit().getSAMFileHeader());
@ -480,7 +506,7 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
final byte qual = p.getQual();
if( p.isDeletion() || qual > (byte) 18) {
int AA = 0; final int AB = 1; int BB = 2;
if( p.getBase() != ref.getBase() || p.isDeletion() || p.isBeforeDeletionStart() || p.isAfterDeletionEnd() || p.isBeforeInsertion() || p.isAfterInsertion() || p.isNextToSoftClip() ) {
if( p.getBase() != ref.getBase() || p.isDeletion() || p.isBeforeDeletionStart() || p.isAfterDeletionEnd() || p.isBeforeInsertion() || p.isAfterInsertion() || p.isNextToSoftClip() ) {
AA = 2;
BB = 0;
if( p.isNextToSoftClip() ) {
@ -511,59 +537,53 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
//---------------------------------------------------------------------------------------------------------------
@Override
public Integer map( final ActiveRegion activeRegion, final RefMetaDataTracker metaDataTracker ) {
public Integer map( final ActiveRegion originalActiveRegion, final RefMetaDataTracker metaDataTracker ) {
if ( justDetermineActiveRegions )
// we're benchmarking ART and/or the active region determination code in the HC, just leave without doing any work
return 1;
final List<VariantContext> activeAllelesToGenotype = new ArrayList<VariantContext>();
if( !originalActiveRegion.isActive() ) { return 0; } // Not active so nothing to do!
final List<VariantContext> activeAllelesToGenotype = new ArrayList<VariantContext>();
if( UG_engine.getUAC().GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ) {
for( final VariantContext vc : allelesToGenotype ) {
if( activeRegion.getLocation().overlapsP( getToolkit().getGenomeLocParser().createGenomeLoc(vc) ) ) {
if( originalActiveRegion.getLocation().overlapsP( getToolkit().getGenomeLocParser().createGenomeLoc(vc) ) ) {
activeAllelesToGenotype.add(vc); // do something with these VCs during GGA mode
}
}
allelesToGenotype.removeAll( activeAllelesToGenotype );
// No alleles found in this region so nothing to do!
if ( activeAllelesToGenotype.isEmpty() ) { return 0; }
} else {
if( originalActiveRegion.size() == 0 ) { return 0; } // No reads here so nothing to do!
}
if( !activeRegion.isActive() ) { return 0; } // Not active so nothing to do!
if( activeRegion.size() == 0 && UG_engine.getUAC().GenotypingMode != GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES ) { return 0; } // No reads here so nothing to do!
if( UG_engine.getUAC().GenotypingMode == GenotypeLikelihoodsCalculationModel.GENOTYPING_MODE.GENOTYPE_GIVEN_ALLELES && activeAllelesToGenotype.isEmpty() ) { return 0; } // No alleles found in this region so nothing to do!
// run the local assembler, getting back a collection of information on how we should proceed
final AssemblyResult assemblyResult = assembleReads(originalActiveRegion, activeAllelesToGenotype);
finalizeActiveRegion(activeRegion); // merge overlapping fragments, clip adapter and low qual tails
final Haplotype referenceHaplotype = new Haplotype(activeRegion.getActiveRegionReference(referenceReader), true); // Create the reference haplotype which is the bases from the reference that make up the active region
final byte[] fullReferenceWithPadding = activeRegion.getActiveRegionReference(referenceReader, REFERENCE_PADDING);
final GenomeLoc paddedReferenceLoc = getPaddedLoc(activeRegion);
final List<Haplotype> haplotypes = assemblyEngine.runLocalAssembly( activeRegion, referenceHaplotype, fullReferenceWithPadding, paddedReferenceLoc, activeAllelesToGenotype );
if( haplotypes.size() == 1 ) { return 1; } // only the reference haplotype remains so nothing else to do!
final List<GATKSAMRecord> filteredReads = filterNonPassingReads( activeRegion ); // filter out reads from genotyping which fail mapping quality based criteria
if( activeRegion.size() == 0 ) { return 1; } // no reads remain after filtering so nothing else to do!
// sort haplotypes to take full advantage of haplotype start offset optimizations in PairHMM
Collections.sort( haplotypes, new Haplotype.HaplotypeBaseComparator() );
if (dontGenotype)
return 1;
// abort early if something is out of the acceptable range
if( assemblyResult.haplotypes.size() == 1 ) { return 1; } // only the reference haplotype remains so nothing else to do!
if( assemblyResult.regionForGenotyping.size() == 0 ) { return 1; } // no reads remain after filtering so nothing else to do!
if (dontGenotype) return 1; // user requested we not proceed
// evaluate each sample's reads against all haplotypes
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = likelihoodCalculationEngine.computeReadLikelihoods( haplotypes, splitReadsBySample( activeRegion.getReads() ) );
//logger.info("Computing read likelihoods with " + assemblyResult.regionForGenotyping.size() + " reads");
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = likelihoodCalculationEngine.computeReadLikelihoods( assemblyResult.haplotypes, splitReadsBySample( assemblyResult.regionForGenotyping.getReads() ) );
// filter out reads from genotyping which fail mapping quality based criteria
final List<GATKSAMRecord> filteredReads = filterNonPassingReads( assemblyResult.regionForGenotyping );
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList = splitReadsBySample( filteredReads );
// subset down to only the best haplotypes to be genotyped in all samples ( in GGA mode use all discovered haplotypes )
final List<Haplotype> bestHaplotypes = selectBestHaplotypesForGenotyping(haplotypes, stratifiedReadMap);
final List<Haplotype> bestHaplotypes = selectBestHaplotypesForGenotyping(assemblyResult.haplotypes, stratifiedReadMap);
final GenotypingEngine.CalledHaplotypes calledHaplotypes = genotypingEngine.assignGenotypeLikelihoods( UG_engine,
bestHaplotypes,
samplesList,
stratifiedReadMap,
perSampleFilteredReadList,
fullReferenceWithPadding,
paddedReferenceLoc,
activeRegion.getLocation(),
assemblyResult.fullReferenceWithPadding,
assemblyResult.paddedReferenceLoc,
assemblyResult.regionForGenotyping.getLocation(),
getToolkit().getGenomeLocParser(),
activeAllelesToGenotype );
@ -574,7 +594,10 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
}
if ( bamWriter != null ) {
haplotypeBAMWriter.writeReadsAlignedToHaplotypes(haplotypes, paddedReferenceLoc, bestHaplotypes, calledHaplotypes.getCalledHaplotypes(), stratifiedReadMap);
haplotypeBAMWriter.writeReadsAlignedToHaplotypes(assemblyResult.haplotypes, assemblyResult.paddedReferenceLoc,
bestHaplotypes,
calledHaplotypes.getCalledHaplotypes(),
stratifiedReadMap);
}
if( DEBUG ) { logger.info("----------------------------------------------------------------------------------"); }
@ -582,6 +605,152 @@ public class HaplotypeCaller extends ActiveRegionWalker<Integer, Integer> implem
return 1; // One active region was processed during this map call
}
private final static class AssemblyResult {
final List<Haplotype> haplotypes;
final ActiveRegion regionForGenotyping;
final byte[] fullReferenceWithPadding;
final GenomeLoc paddedReferenceLoc;
private AssemblyResult(List<Haplotype> haplotypes, ActiveRegion regionForGenotyping, byte[] fullReferenceWithPadding, GenomeLoc paddedReferenceLoc) {
this.haplotypes = haplotypes;
this.regionForGenotyping = regionForGenotyping;
this.fullReferenceWithPadding = fullReferenceWithPadding;
this.paddedReferenceLoc = paddedReferenceLoc;
}
}
/**
* High-level function that runs the assembler on the active region reads,
* returning a data structure with the resulting information needed
* for further HC steps
*
* @param activeRegion the region we should assemble
* @param activeAllelesToGenotype additional alleles we might need to genotype (can be empty)
* @return the AssemblyResult describing how to proceed with genotyping
*/
protected AssemblyResult assembleReads(final ActiveRegion activeRegion, final List<VariantContext> activeAllelesToGenotype) {
// Create the reference haplotype which is the bases from the reference that make up the active region
finalizeActiveRegion(activeRegion); // merge overlapping fragments, clip adapter and low qual tails
final Haplotype referenceHaplotype = new Haplotype(activeRegion.getActiveRegionReference(referenceReader), true);
final byte[] fullReferenceWithPadding = activeRegion.getActiveRegionReference(referenceReader, REFERENCE_PADDING);
final GenomeLoc paddedReferenceLoc = getPaddedLoc(activeRegion);
final List<Haplotype> haplotypes = assemblyEngine.runLocalAssembly( activeRegion, referenceHaplotype, fullReferenceWithPadding, paddedReferenceLoc, activeAllelesToGenotype );
if ( ! dontTrimActiveRegions ) {
return trimActiveRegion(activeRegion, haplotypes, fullReferenceWithPadding, paddedReferenceLoc);
} else {
// we don't want to or cannot create a trimmed active region, so go ahead and use the old one
return new AssemblyResult(haplotypes, activeRegion, fullReferenceWithPadding, paddedReferenceLoc);
}
}
/**
* Trim down the active region to just enough to properly genotype the events among the haplotypes
*
* This function merely creates the region, but it doesn't populate the reads back into the region
*
* @param region our full active region
* @param haplotypes the list of haplotypes we've created from assembly
* @param ref the reference bases over the full padded location
* @param refLoc the span of the reference bases
* @return a new ActiveRegion trimmed down to just what's needed for genotyping, or null if we couldn't do this successfully
*/
private ActiveRegion createTrimmedRegion(final ActiveRegion region, final List<Haplotype> haplotypes, final byte[] ref, final GenomeLoc refLoc) {
EventMap.buildEventMapsForHaplotypes(haplotypes, ref, refLoc, DEBUG);
final TreeSet<VariantContext> allContexts = EventMap.getAllVariantContexts(haplotypes);
final GenomeLocParser parser = getToolkit().getGenomeLocParser();
if ( allContexts.isEmpty() ) // no variants, so just return the current region
return null;
final List<VariantContext> withinActiveRegion = new LinkedList<VariantContext>();
int pad = PADDING_AROUND_SNPS_FOR_CALLING;
GenomeLoc trimLoc = null;
for ( final VariantContext vc : allContexts ) {
final GenomeLoc vcLoc = parser.createGenomeLoc(vc);
if ( region.getLocation().overlapsP(vcLoc) ) {
if ( ! vc.isSNP() ) // if anything isn't a SNP use the bigger padding
pad = PADDING_AROUND_OTHERS_FOR_CALLING;
trimLoc = trimLoc == null ? vcLoc : trimLoc.endpointSpan(vcLoc);
withinActiveRegion.add(vc);
}
}
// we don't actually have anything in the region after removing variants that don't overlap the region's full location
if ( trimLoc == null ) return null;
final GenomeLoc maxSpan = getToolkit().getGenomeLocParser().createPaddedGenomeLoc(region.getLocation(), MAX_GENOTYPING_ACTIVE_REGION_EXTENSION);
final GenomeLoc idealSpan = getToolkit().getGenomeLocParser().createPaddedGenomeLoc(trimLoc, pad);
final GenomeLoc finalSpan = maxSpan.intersect(idealSpan);
final ActiveRegion trimmedRegion = region.trim(finalSpan);
if ( DEBUG ) {
logger.info("events : " + withinActiveRegion);
logger.info("trimLoc : " + trimLoc);
logger.info("pad : " + pad);
logger.info("idealSpan : " + idealSpan);
logger.info("maxSpan : " + maxSpan);
logger.info("finalSpan : " + finalSpan);
logger.info("regionSpan : " + trimmedRegion.getExtendedLoc() + " size is " + trimmedRegion.getExtendedLoc().size());
}
return trimmedRegion;
}
/**
* Trim down the active region to just enough to properly genotype the events among the haplotypes
*
* @param originalActiveRegion our full active region
* @param haplotypes the list of haplotypes we've created from assembly
* @param fullReferenceWithPadding the reference bases over the full padded location
* @param paddedReferenceLoc the span of the reference bases
* @return an AssemblyResult containing the trimmed active region with all of the reads we should use
* trimmed down as well, and a revised set of haplotypes. If trimming failed this function
* may choose to use the originalActiveRegion without modification
*/
private AssemblyResult trimActiveRegion(final ActiveRegion originalActiveRegion,
final List<Haplotype> haplotypes,
final byte[] fullReferenceWithPadding,
final GenomeLoc paddedReferenceLoc) {
final ActiveRegion trimmedActiveRegion = createTrimmedRegion(originalActiveRegion, haplotypes, fullReferenceWithPadding, paddedReferenceLoc);
if ( trimmedActiveRegion == null )
return new AssemblyResult(haplotypes, originalActiveRegion, fullReferenceWithPadding, paddedReferenceLoc);
// trim down the haplotypes
final Set<Haplotype> haplotypeSet = new HashSet<Haplotype>(haplotypes.size());
for ( final Haplotype h : haplotypes ) {
final Haplotype trimmed = h.trim(trimmedActiveRegion.getExtendedLoc());
if ( trimmed != null ) {
haplotypeSet.add(trimmed);
} else if ( DEBUG ) {
logger.info("Throwing out haplotype " + h + " with cigar " + h.getCigar() + " because it starts with or ends with an insertion or deletion when trimmed to " + trimmedActiveRegion.getExtendedLoc());
}
}
// create the final list of trimmed haplotypes
final List<Haplotype> trimmedHaplotypes = new ArrayList<Haplotype>(haplotypeSet);
// sort haplotypes to take full advantage of haplotype start offset optimizations in PairHMM
Collections.sort( trimmedHaplotypes, new HaplotypeBaseComparator() );
if ( DEBUG ) logger.info("Trimming haplotypes reduced number of haplotypes from " + haplotypes.size() + " to only " + trimmedHaplotypes.size());
// trim down the reads and add them to the trimmed active region
final List<GATKSAMRecord> trimmedReads = new ArrayList<GATKSAMRecord>(originalActiveRegion.getReads().size());
for( final GATKSAMRecord read : originalActiveRegion.getReads() ) {
final GATKSAMRecord clippedRead = ReadClipper.hardClipToRegion( read, trimmedActiveRegion.getExtendedLoc().getStart(), trimmedActiveRegion.getExtendedLoc().getStop() );
if( trimmedActiveRegion.readOverlapsRegion(clippedRead) && clippedRead.getReadLength() > 0 ) {
trimmedReads.add(clippedRead);
}
}
trimmedActiveRegion.clearReads();
trimmedActiveRegion.addAll(ReadUtils.sortReadsByCoordinate(trimmedReads));
return new AssemblyResult(trimmedHaplotypes, trimmedActiveRegion, fullReferenceWithPadding, paddedReferenceLoc);
}
/**
* Select the best N haplotypes according to their likelihoods, if appropriate
*

View File

@ -58,7 +58,7 @@ import org.broadinstitute.sting.gatk.walkers.Reference;
import org.broadinstitute.sting.gatk.walkers.RodWalker;
import org.broadinstitute.sting.gatk.walkers.Window;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.help.HelpConstants;
import org.broadinstitute.variant.vcf.VCFHeader;
@ -360,8 +360,8 @@ public class HaplotypeResolver extends RodWalker<Integer, Integer> {
}
// order results by start position
final TreeMap<Integer, VariantContext> source1Map = new TreeMap<Integer, VariantContext>(GenotypingEngine.generateVCsFromAlignment(new Haplotype(source1Haplotype), 0, swConsensus1.getCigar(), refContext.getBases(), source1Haplotype, refContext.getWindow(), source1));
final TreeMap<Integer, VariantContext> source2Map = new TreeMap<Integer, VariantContext>(GenotypingEngine.generateVCsFromAlignment(new Haplotype(source2Haplotype), 0, swConsensus2.getCigar(), refContext.getBases(), source2Haplotype, refContext.getWindow(), source2));
final TreeMap<Integer, VariantContext> source1Map = new TreeMap<Integer, VariantContext>(GenotypingEngine.generateVCsFromAlignment(new Haplotype(source1Haplotype, false, 0, swConsensus1.getCigar()), refContext.getBases(), refContext.getWindow(), source1));
final TreeMap<Integer, VariantContext> source2Map = new TreeMap<Integer, VariantContext>(GenotypingEngine.generateVCsFromAlignment(new Haplotype(source2Haplotype, false, 0, swConsensus2.getCigar()), refContext.getBases(), refContext.getWindow(), source2));
if ( source1Map.size() == 0 || source2Map.size() == 0 ) {
// TODO -- handle errors appropriately
logger.debug("No source alleles; aborting at " + refContext.getLocus());

View File

@ -51,110 +51,31 @@ import org.apache.log4j.Logger;
import java.util.*;
/**
* generic utility function that error corrects kmers based on counts
*
* This class provides a generic facility for remapping kmers (byte[] of constant size)
* that occur infrequently to those that occur frequently, based on their simple edit distance
* as measured by mismatches.
*
* The overall workflow of using this class is simple. First, you create the class with
* parameters determining how the error correction should proceed. Next, you provide all
* of the kmers you see in your data. Once all kmers have been added, you call computeErrorCorrectionMap
* to tell this class that all kmers have been added and its time to determine error correcting
* mapping from observed kmers to corrected kmers. This correction looks for low-count (as determined
* by maxCountToCorrect) kmers and chooses the best kmer (minimizing mismatches) among those
* with at least minCountOfKmerToBeCorrection occurrences to error correct the kmer to. If
* there is no kmer with less than maxMismatchesToCorrect then the kmer will be mapped to
* null, indicating the kmer should not be used.
*
* TODO -- for ease of implementation this class uses strings instead of byte[] as those cannot
* TODO -- be added to hashmaps (more specifically, those don't implement .equals). A more efficient
* TODO -- version would use the byte[] directly
*
* TODO -- this is just not the right way to implement error correction in the graph. Basically, the
* right way to think about this is error correcting reads:
*
* *
* ACTGAT
* ACT
* CTG
* TGA
* GAT
*
* Now suppose the G is an error. What you are doing is asking for each 3mer in the read whether it's high quality
* or not. Suppose the answer is
*
* *
* ACTGAT
* ACT -- yes
* CTG -- no [CTG is unusual]
* TGA -- no [TGA is unusual]
* GAT -- yes [maybe GAT is just common, even through its an error]
*
* As we do this process it's clear how we can figure out which positions in the read likely harbor errors, and
* then go search around those bases in the read in an attempt to fix the read. We don't have to compute for
* every bad kmer it's best match, as that's just not the problem we are thinking looking to solve. We are actually
* looking for a change to a read such that all spanning kmers are well-supported. This class is being disabled
* until we figure implement this change.
* generic utility class that counts kmers
*
* Basically you add kmers to the counter, and it tells you how many occurrences of each kmer it's seen.
*
* User: depristo
* Date: 3/8/13
* Time: 1:16 PM
*/
public class KMerErrorCorrector {
private final static Logger logger = Logger.getLogger(KMerErrorCorrector.class);
/**
* The maximum number of bad kmer -> good kmer correction operations we'll consider doing before
* aborting for efficiency reasons. Basically, the current algorithm sucks, and is O(n^2), and
* so we cannot simply error correct 10K bad kmers against a db of 100K kmers if we ever want
* to finish running in a reasonable amount of time. This isn't worth fixing because fundamentally
* the entire error correction algorithm is just not right (i.e., it's correct but not ideal conceptually
* so we'll just fix the conceptual problem than the performance issue).
*/
private final static int MAX_CORRECTION_OPS_TO_ALLOW = 5000 * 1000;
public class KMerCounter {
private final static Logger logger = Logger.getLogger(KMerCounter.class);
/**
* A map of for each kmer to its num occurrences in addKmers
*/
Map<String, CountedKmer> countsByKMer = new HashMap<String, CountedKmer>();
private final Map<String, CountedKmer> countsByKMer = new HashMap<String, CountedKmer>();
private final int kmerLength;
/**
* A map from raw kmer -> error corrected kmer
*/
Map<String, String> rawToErrorCorrectedMap = null;
final int kmerLength;
final int maxCountToCorrect;
final int maxMismatchesToCorrect;
final int minCountOfKmerToBeCorrection;
/**
* Create a new kmer corrector
* Create a new kmer counter
*
* @param kmerLength the length of kmers we'll be counting to error correct, must be >= 1
* @param maxCountToCorrect kmers with < maxCountToCorrect will try to be error corrected to another kmer, must be >= 0
* @param maxMismatchesToCorrect the maximum number of mismatches between a to-be-corrected kmer and its
* best match that we attempt to error correct. If no sufficiently similar
* kmer exists, it will be remapped to null. Must be >= 1
* @param minCountOfKmerToBeCorrection the minimum count of a kmer to be considered a target for correction.
* That is, kmers that need correction will only be matched with kmers
* with at least minCountOfKmerToBeCorrection occurrences. Must be >= 1
*/
public KMerErrorCorrector(final int kmerLength,
final int maxCountToCorrect,
final int maxMismatchesToCorrect,
final int minCountOfKmerToBeCorrection) {
public KMerCounter(final int kmerLength) {
if ( kmerLength < 1 ) throw new IllegalArgumentException("kmerLength must be > 0 but got " + kmerLength);
if ( maxCountToCorrect < 0 ) throw new IllegalArgumentException("maxCountToCorrect must be >= 0 but got " + maxCountToCorrect);
if ( maxMismatchesToCorrect < 1 ) throw new IllegalArgumentException("maxMismatchesToCorrect must be >= 1 but got " + maxMismatchesToCorrect);
if ( minCountOfKmerToBeCorrection < 1 ) throw new IllegalArgumentException("minCountOfKmerToBeCorrection must be >= 1 but got " + minCountOfKmerToBeCorrection);
this.kmerLength = kmerLength;
this.maxCountToCorrect = maxCountToCorrect;
this.maxMismatchesToCorrect = maxMismatchesToCorrect;
this.minCountOfKmerToBeCorrection = minCountOfKmerToBeCorrection;
}
/**
@ -165,7 +86,17 @@ public class KMerErrorCorrector {
protected void addKmers(final String ... kmers) {
for ( final String kmer : kmers )
addKmer(kmer, 1);
computeErrorCorrectionMap();
}
/**
* Get the count of kmer in this kmer counter
* @param kmer a non-null counter to get
* @return a positive integer
*/
public int getKmerCount(final byte[] kmer) {
if ( kmer == null ) throw new IllegalArgumentException("kmer cannot be null");
final CountedKmer counted = countsByKMer.get(new String(kmer));
return counted == null ? 0 : counted.count;
}
/**
@ -178,68 +109,9 @@ public class KMerErrorCorrector {
addKmer(new String(rawKmer), kmerCount);
}
/**
* Get the error corrected kmer for rawKmer
*
* @param rawKmer a kmer that was already added that we want to get an error corrected version for
* @return an error corrected kmer to use instead of rawKmer. May be == rawKmer if no error correction
* is not necessary. May be null, indicating the rawKmer shouldn't be used at all
*/
public byte[] getErrorCorrectedKmer(final byte[] rawKmer) {
final String result = getErrorCorrectedKmer(new String(rawKmer));
return result == null ? null : result.getBytes();
}
/**
* Indicate that no more kmers will be added to the kmer error corrector, so that the
* error correction data structure should be computed from the added kmers. Enabled calls
* to getErrorCorrectedKmer, and disable calls to addKmer.
*
* @return true if the error correction map could actually be computed, false if for any reason
* (efficiency, memory, we're out to lunch) a correction map couldn't be created.
*/
public boolean computeErrorCorrectionMap() {
if ( countsByKMer == null )
throw new IllegalStateException("computeErrorCorrectionMap can only be called once");
final LinkedList<CountedKmer> needsCorrection = new LinkedList<CountedKmer>();
final List<CountedKmer> goodKmers = new ArrayList<CountedKmer>(countsByKMer.size());
rawToErrorCorrectedMap = new HashMap<String, String>(countsByKMer.size());
for ( final CountedKmer countedKmer: countsByKMer.values() ) {
if ( countedKmer.count <= maxCountToCorrect )
needsCorrection.add(countedKmer);
else {
// todo -- optimization could make not in map mean ==
rawToErrorCorrectedMap.put(countedKmer.kmer, countedKmer.kmer);
// only allow corrections to kmers with at least this count
if ( countedKmer.count >= minCountOfKmerToBeCorrection )
goodKmers.add(countedKmer);
}
}
// cleanup memory -- we don't need the counts for each kmer any longer
countsByKMer = null;
if ( goodKmers.size() * needsCorrection.size() > MAX_CORRECTION_OPS_TO_ALLOW )
return false;
else {
Collections.sort(goodKmers);
for ( final CountedKmer toCorrect : needsCorrection ) {
final String corrected = findClosestKMer(toCorrect, goodKmers);
rawToErrorCorrectedMap.put(toCorrect.kmer, corrected);
}
return true;
}
}
protected void addKmer(final String rawKmer, final int kmerCount) {
if ( rawKmer.length() != kmerLength ) throw new IllegalArgumentException("bad kmer length " + rawKmer + " expected size " + kmerLength);
if ( kmerCount < 0 ) throw new IllegalArgumentException("bad kmerCount " + kmerCount);
if ( countsByKMer == null ) throw new IllegalStateException("Cannot add kmers to an already finalized error corrector");
CountedKmer countFromMap = countsByKMer.get(rawKmer);
if ( countFromMap == null ) {
@ -249,55 +121,10 @@ public class KMerErrorCorrector {
countFromMap.count += kmerCount;
}
protected String findClosestKMer(final CountedKmer kmer, final Collection<CountedKmer> goodKmers) {
String bestMatch = null;
int minMismatches = Integer.MAX_VALUE;
for ( final CountedKmer goodKmer : goodKmers ) {
final int mismatches = countMismatches(kmer.kmer, goodKmer.kmer, minMismatches);
if ( mismatches < minMismatches ) {
minMismatches = mismatches;
bestMatch = goodKmer.kmer;
}
// if we find an edit-distance 1 result, abort early, as we know there can be no edit distance 0 results
if ( mismatches == 1 )
break;
}
return minMismatches > maxMismatchesToCorrect ? null : bestMatch;
}
protected int countMismatches(final String one, final String two, final int currentBest) {
int mismatches = 0;
for ( int i = 0; i < one.length(); i++ ) {
mismatches += one.charAt(i) == two.charAt(i) ? 0 : 1;
if ( mismatches > currentBest )
break;
if ( mismatches > maxMismatchesToCorrect )
return Integer.MAX_VALUE;
}
return mismatches;
}
protected String getErrorCorrectedKmer(final String rawKmer) {
if ( rawToErrorCorrectedMap == null ) throw new IllegalStateException("Cannot get error corrected kmers until after computeErrorCorrectionMap has been called");
if ( rawKmer.length() != kmerLength ) throw new IllegalArgumentException("bad kmer length " + rawKmer + " expected size " + kmerLength);
return rawToErrorCorrectedMap.get(rawKmer);
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder("KMerErrorCorrector{");
if ( rawToErrorCorrectedMap == null ) {
b.append("counting ").append(countsByKMer.size()).append(" distinct kmers");
} else {
for ( Map.Entry<String, String> toCorrect : rawToErrorCorrectedMap.entrySet() ) {
final boolean correcting = ! toCorrect.getKey().equals(toCorrect.getValue());
if ( correcting )
b.append(String.format("%n\tCorrecting %s -> %s", toCorrect.getKey(), toCorrect.getValue()));
}
}
final StringBuilder b = new StringBuilder("KMerCounter{");
b.append("counting ").append(countsByKMer.size()).append(" distinct kmers");
b.append("\n}");
return b.toString();
}

View File

@ -50,7 +50,7 @@ import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.QualityUtils;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
@ -69,6 +69,14 @@ public class LikelihoodCalculationEngine {
private final byte constantGCP;
private final boolean DEBUG;
private final PairHMM pairHMM;
private final int minReadLength = 20;
/**
* The expected rate of random sequencing errors for a read originating from its true haplotype.
*
* For example, if this is 0.01, then we'd expect 1 error per 100 bp.
*/
private final double EXPECTED_ERROR_RATE_PER_BASE = 0.02;
public LikelihoodCalculationEngine( final byte constantGCP, final boolean debug, final PairHMM.HMM_IMPLEMENTATION hmmType ) {
@ -90,9 +98,16 @@ public class LikelihoodCalculationEngine {
DEBUG = debug;
}
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods( final List<Haplotype> haplotypes, final Map<String, List<GATKSAMRecord>> perSampleReadList ) {
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = new HashMap<String, PerReadAlleleLikelihoodMap>();
/**
* Initialize our pairHMM with parameters appropriate to the haplotypes and reads we're going to evaluate
*
* After calling this routine the PairHMM will be configured to best evaluate all reads in the samples
* against the set of haplotypes
*
* @param haplotypes a non-null list of haplotypes
* @param perSampleReadList a mapping from sample -> reads
*/
private void initializePairHMM(final List<Haplotype> haplotypes, final Map<String, List<GATKSAMRecord>> perSampleReadList) {
int X_METRIC_LENGTH = 0;
for( final Map.Entry<String, List<GATKSAMRecord>> sample : perSampleReadList.entrySet() ) {
for( final GATKSAMRecord read : sample.getValue() ) {
@ -108,13 +123,27 @@ public class LikelihoodCalculationEngine {
// initialize arrays to hold the probabilities of being in the match, insertion and deletion cases
pairHMM.initialize(X_METRIC_LENGTH, Y_METRIC_LENGTH);
}
// for each sample's reads
public Map<String, PerReadAlleleLikelihoodMap> computeReadLikelihoods( final List<Haplotype> haplotypes, final Map<String, List<GATKSAMRecord>> perSampleReadList ) {
// configure the HMM
initializePairHMM(haplotypes, perSampleReadList);
// Add likelihoods for each sample's reads to our stratifiedReadMap
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap = new HashMap<String, PerReadAlleleLikelihoodMap>();
for( final Map.Entry<String, List<GATKSAMRecord>> sampleEntry : perSampleReadList.entrySet() ) {
//if( DEBUG ) { System.out.println("Evaluating sample " + sample + " with " + perSampleReadList.get( sample ).size() + " passing reads"); }
// evaluate the likelihood of the reads given those haplotypes
stratifiedReadMap.put(sampleEntry.getKey(), computeReadLikelihoods(haplotypes, sampleEntry.getValue()));
final PerReadAlleleLikelihoodMap map = computeReadLikelihoods(haplotypes, sampleEntry.getValue());
final List<GATKSAMRecord> removedReads = map.filterPoorlyModelledReads(EXPECTED_ERROR_RATE_PER_BASE);
// logger.info("Removed " + removedReads.size() + " reads because of bad likelihoods from sample " + sampleEntry.getKey());
// for ( final GATKSAMRecord read : removedReads )
// logger.info("\tRemoved " + read.getReadName());
stratifiedReadMap.put(sampleEntry.getKey(), map);
}
return stratifiedReadMap;
}
@ -128,6 +157,10 @@ public class LikelihoodCalculationEngine {
final PerReadAlleleLikelihoodMap perReadAlleleLikelihoodMap = new PerReadAlleleLikelihoodMap();
for( final GATKSAMRecord read : reads ) {
if ( read.getReadLength() < minReadLength )
// don't consider any reads that have a read length < the minimum
continue;
final byte[] overallGCP = new byte[read.getReadLength()];
Arrays.fill( overallGCP, constantGCP ); // Is there a way to derive empirical estimates for this from the data?
// NOTE -- must clone anything that gets modified here so we don't screw up future uses of the read
@ -151,6 +184,7 @@ public class LikelihoodCalculationEngine {
perReadAlleleLikelihoodMap.add(read, alleleVersions.get(haplotype), log10l);
}
}
return perReadAlleleLikelihoodMap;
}
@ -158,17 +192,17 @@ public class LikelihoodCalculationEngine {
@Ensures({"result.length == result[0].length", "result.length == alleleOrdering.size()"})
public static double[][] computeDiploidHaplotypeLikelihoods( final String sample,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap,
final List<Allele> alleleOrdering ) {
final TreeSet<String> sampleSet = new TreeSet<String>();
sampleSet.add(sample);
return computeDiploidHaplotypeLikelihoods(sampleSet, stratifiedReadMap, alleleOrdering);
final List<Allele> alleleOrdering,
final boolean normalize ) {
return computeDiploidHaplotypeLikelihoods(Collections.singleton(sample), stratifiedReadMap, alleleOrdering, normalize);
}
@Requires({"alleleOrdering.size() > 0"})
@Ensures({"result.length == result[0].length", "result.length == alleleOrdering.size()"})
public static double[][] computeDiploidHaplotypeLikelihoods( final Set<String> samples,
final Map<String, PerReadAlleleLikelihoodMap> stratifiedReadMap,
final List<Allele> alleleOrdering ) {
final List<Allele> alleleOrdering,
final boolean normalize) {
final int numHaplotypes = alleleOrdering.size();
final double[][] haplotypeLikelihoodMatrix = new double[numHaplotypes][numHaplotypes];
@ -195,7 +229,7 @@ public class LikelihoodCalculationEngine {
}
// normalize the diploid likelihoods matrix
return normalizeDiploidLikelihoodMatrixFromLog10( haplotypeLikelihoodMatrix );
return normalize ? normalizeDiploidLikelihoodMatrixFromLog10( haplotypeLikelihoodMatrix ) : haplotypeLikelihoodMatrix;
}
@Requires({"likelihoodMatrix.length == likelihoodMatrix[0].length"})
@ -230,7 +264,7 @@ public class LikelihoodCalculationEngine {
final List<Allele> haplotypesAsAlleles = new ArrayList<Allele>();
for( final Haplotype h : haplotypes ) { haplotypesAsAlleles.add(Allele.create(h, true)); }
final double[][] haplotypeLikelihoodMatrix = computeDiploidHaplotypeLikelihoods( sampleKeySet, stratifiedReadMap, haplotypesAsAlleles ); // all samples pooled together
final double[][] haplotypeLikelihoodMatrix = computeDiploidHaplotypeLikelihoods( sampleKeySet, stratifiedReadMap, haplotypesAsAlleles, true ); // all samples pooled together
int hap1 = 0;
int hap2 = 0;

View File

@ -47,7 +47,7 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
import org.broadinstitute.variant.variantcontext.VariantContext;

View File

@ -137,6 +137,30 @@ public class BaseGraph<T extends BaseVertex> extends DefaultDirectedGraph<T, Bas
return outDegreeOf(v) == 0;
}
/**
* Get the set of source vertices of this graph
* @return a non-null set
*/
public Set<T> getSources() {
final Set<T> set = new LinkedHashSet<T>();
for ( final T v : vertexSet() )
if ( isSource(v) )
set.add(v);
return set;
}
/**
* Get the set of sink vertices of this graph
* @return a non-null set
*/
public Set<T> getSinks() {
final Set<T> set = new LinkedHashSet<T>();
for ( final T v : vertexSet() )
if ( isSink(v) )
set.add(v);
return set;
}
/**
* Pull out the additional sequence implied by traversing this node in the graph
* @param v the vertex from which to pull out the additional base sequence

View File

@ -58,6 +58,8 @@ import java.util.Arrays;
*/
public class BaseVertex {
final byte[] sequence;
private final static int UNASSIGNED_HASHCODE = -1;
int cachedHashCode = UNASSIGNED_HASHCODE;
/**
* Create a new sequence vertex with sequence
@ -128,8 +130,10 @@ public class BaseVertex {
*/
@Override
public int hashCode() {
// TODO -- optimization, could compute upfront once and cached in debruijn graph
return Arrays.hashCode(sequence);
if ( cachedHashCode == UNASSIGNED_HASHCODE ) {
cachedHashCode = Arrays.hashCode(sequence);
}
return cachedHashCode;
}
@Override

View File

@ -48,7 +48,6 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import com.google.java.contract.Requires;
import java.io.File;
import java.util.*;
/**
@ -177,9 +176,9 @@ public class CommonSuffixSplitter {
*/
@Requires("!middleVertices.isEmpty()")
protected static SeqVertex commonSuffix(final Collection<SeqVertex> middleVertices) {
final List<byte[]> kmers = Utils.getKmers(middleVertices);
final int min = Utils.minKmerLength(kmers);
final int suffixLen = Utils.compSuffixLen(kmers, min);
final List<byte[]> kmers = GraphUtils.getKmers(middleVertices);
final int min = GraphUtils.minKmerLength(kmers);
final int suffixLen = GraphUtils.compSuffixLen(kmers, min);
final byte[] kmer = kmers.get(0);
final byte[] suffix = Arrays.copyOfRange(kmer, kmer.length - suffixLen, kmer.length);
return new SeqVertex(suffix);

View File

@ -47,7 +47,6 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import com.google.java.contract.Ensures;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.KMerErrorCorrector;
import java.util.Arrays;
import java.util.HashMap;
@ -59,7 +58,7 @@ import java.util.Map;
* User: rpoplin
* Date: 2/6/13
*/
public class DeBruijnGraph extends BaseGraph<DeBruijnVertex> {
public final class DeBruijnGraph extends BaseGraph<DeBruijnVertex> {
/**
* Create an empty DeBruijnGraph with default kmer size
*/

View File

@ -54,7 +54,7 @@ import com.google.java.contract.Ensures;
* User: ebanks, mdepristo
* Date: Mar 23, 2011
*/
public class DeBruijnVertex extends BaseVertex {
public final class DeBruijnVertex extends BaseVertex {
private final static byte[][] sufficesAsByteArray = new byte[256][];
static {
for ( int i = 0; i < sufficesAsByteArray.length; i++ )

View File

@ -60,8 +60,8 @@ import java.util.List;
* Date: 3/25/13
* Time: 9:42 PM
*/
final class Utils {
private Utils() {}
final class GraphUtils {
private GraphUtils() {}
/**
* Compute the maximum shared prefix length of list of bytes.

View File

@ -50,10 +50,7 @@ import com.google.common.collect.MinMaxPriorityQueue;
import com.google.java.contract.Ensures;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.*;
/**
* Class for finding the K best paths (as determined by the sum of multiplicities of the edges) in a graph.
@ -63,7 +60,23 @@ import java.util.List;
* Date: Mar 23, 2011
*/
public class KBestPaths<T extends BaseVertex> {
public KBestPaths() { }
private final boolean allowCycles;
/**
* Create a new KBestPaths finder that follows cycles in the graph
*/
public KBestPaths() {
this(true);
}
/**
* Create a new KBestPaths finder
*
* @param allowCycles should we allow paths that follow cycles in the graph?
*/
public KBestPaths(final boolean allowCycles) {
this.allowCycles = allowCycles;
}
protected static class MyInt { public int val = 0; }
@ -78,31 +91,61 @@ public class KBestPaths<T extends BaseVertex> {
}
/**
* @see #getKBestPaths(BaseGraph, int) retriving the first 1000 paths
* @see #getKBestPaths(BaseGraph, int) retriving the best 1000 paths
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph ) {
return getKBestPaths(graph, 1000);
}
/**
* Traverse the graph and pull out the best k paths.
* Paths are scored via their comparator function. The default being PathComparatorTotalScore()
* @param graph the graph from which to pull paths
* @param k the number of paths to find
* @return a list with at most k top-scoring paths from the graph
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) retriving the first 1000 paths
* starting from all source vertices and ending with all sink vertices
*/
@Ensures({"result != null", "result.size() <= k"})
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final int k ) {
return getKBestPaths(graph, k, graph.getSources(), graph.getSinks());
}
/**
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) with k=1000
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final Set<T> sources, final Set<T> sinks ) {
return getKBestPaths(graph, 1000, sources, sinks);
}
/**
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) with k=1000
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final T source, final T sink ) {
return getKBestPaths(graph, 1000, source, sink);
}
/**
* @see #getKBestPaths(BaseGraph, int, java.util.Set, java.util.Set) with singleton source and sink sets
*/
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final int k, final T source, final T sink ) {
return getKBestPaths(graph, k, Collections.singleton(source), Collections.singleton(sink));
}
/**
* Traverse the graph and pull out the best k paths.
* Paths are scored via their comparator function. The default being PathComparatorTotalScore()
* @param graph the graph from which to pull paths
* @param k the number of paths to find
* @param sources a set of vertices we want to start paths with
* @param sinks a set of vertices we want to end paths with
* @return a list with at most k top-scoring paths from the graph
*/
@Ensures({"result != null", "result.size() <= k"})
public List<Path<T>> getKBestPaths( final BaseGraph<T> graph, final int k, final Set<T> sources, final Set<T> sinks ) {
if( graph == null ) { throw new IllegalArgumentException("Attempting to traverse a null graph."); }
// a min max queue that will collect the best k paths
final MinMaxPriorityQueue<Path<T>> bestPaths = MinMaxPriorityQueue.orderedBy(new PathComparatorTotalScore()).maximumSize(k).create();
// run a DFS for best paths
for ( final T v : graph.vertexSet() ) {
if ( graph.inDegreeOf(v) == 0 ) {
findBestPaths(new Path<T>(v, graph), bestPaths, new MyInt());
}
for ( final T source : sources ) {
final Path<T> startingPath = new Path<T>(source, graph);
findBestPaths(startingPath, sinks, bestPaths, new MyInt());
}
// the MinMaxPriorityQueue iterator returns items in an arbitrary order, so we need to sort the final result
@ -111,9 +154,15 @@ public class KBestPaths<T extends BaseVertex> {
return toReturn;
}
private void findBestPaths( final Path<T> path, final MinMaxPriorityQueue<Path<T>> bestPaths, final MyInt n ) {
// did we hit the end of a path?
if ( allOutgoingEdgesHaveBeenVisited(path) ) {
/**
* Recursive algorithm to find the K best paths in the graph from the current path to any of the sinks
* @param path the current path progress
* @param sinks a set of nodes that are sinks. Will terminate and add a path if the last vertex of path is in this set
* @param bestPaths a path to collect completed paths.
* @param n used to limit the search by tracking the number of vertices visited across all paths
*/
private void findBestPaths( final Path<T> path, final Set<T> sinks, final Collection<Path<T>> bestPaths, final MyInt n ) {
if ( sinks.contains(path.getLastVertex())) {
bestPaths.add(path);
} else if( n.val > 10000 ) {
// do nothing, just return, as we've done too much work already
@ -122,31 +171,15 @@ public class KBestPaths<T extends BaseVertex> {
final ArrayList<BaseEdge> edgeArrayList = new ArrayList<BaseEdge>(path.getOutgoingEdgesOfLastVertex());
Collections.sort(edgeArrayList, new BaseEdge.EdgeWeightComparator());
for ( final BaseEdge edge : edgeArrayList ) {
final T target = path.getGraph().getEdgeTarget(edge);
// make sure the edge is not already in the path
if ( path.containsEdge(edge) )
continue;
final Path<T> newPath = new Path<T>(path, edge);
n.val++;
findBestPaths(newPath, bestPaths, n);
final boolean alreadyVisited = allowCycles ? path.containsEdge(edge) : path.containsVertex(target);
if ( ! alreadyVisited ) {
final Path<T> newPath = new Path<T>(path, edge);
n.val++;
findBestPaths(newPath, sinks, bestPaths, n);
}
}
}
}
/**
* Have all of the outgoing edges of the final vertex been visited?
*
* I.e., are all outgoing vertices of the current path in the list of edges of the graph?
*
* @param path the path to test
* @return true if all the outgoing edges at the end of this path have already been visited
*/
private boolean allOutgoingEdgesHaveBeenVisited( final Path<T> path ) {
for( final BaseEdge edge : path.getOutgoingEdgesOfLastVertex() ) {
if( !path.containsEdge(edge) ) { // TODO -- investigate allowing numInPath < 2 to allow cycles
return false;
}
}
return true;
}
}
}

View File

@ -148,6 +148,19 @@ public class Path<T extends BaseVertex> {
return edgesAsSet.contains(edge);
}
/**
* Does this path contain the given vertex?
*
* @param v a non-null vertex
* @return true if v occurs within this path, false otherwise
*/
public boolean containsVertex(final T v) {
if ( v == null ) throw new IllegalArgumentException("Vertex cannot be null");
// TODO -- warning this is expensive. Need to do vertex caching
return getVertices().contains(v);
}
/**
* Check that two paths have the same edges and total score
* @param path the other path we might be the same as

View File

@ -46,10 +46,13 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import org.apache.commons.lang.ArrayUtils;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import java.io.File;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
/**
@ -58,9 +61,16 @@ import java.util.Set;
* @author: depristo
* @since 03/2013
*/
public class SeqGraph extends BaseGraph<SeqVertex> {
public final class SeqGraph extends BaseGraph<SeqVertex> {
private final static boolean PRINT_SIMPLIFY_GRAPHS = false;
private final static int MIN_SUFFIX_TO_MERGE_TAILS = 5;
/**
* The minimum number of common bp from the prefix (head merging) or suffix (tail merging)
* required before we'll merge in such configurations. A large value here is critical to avoid
* merging inappropriate head or tail nodes, which introduces large insertion / deletion events
* as the merge operation creates a link among the non-linked sink / source vertices
*/
protected final static int MIN_COMMON_SEQUENCE_TO_MERGE_SOURCE_SINK_VERTICES = 10;
/**
* Construct an empty SeqGraph
@ -100,15 +110,15 @@ public class SeqGraph extends BaseGraph<SeqVertex> {
//logger.info("simplifyGraph iteration " + i);
// iterate until we haven't don't anything useful
didSomeWork = false;
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".dot"), 0);
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".1.dot"), 0);
didSomeWork |= new MergeDiamonds().transformUntilComplete();
didSomeWork |= new MergeTails().transformUntilComplete();
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".diamonds_and_tails.dot"), 0);
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".2.diamonds_and_tails.dot"), 0);
didSomeWork |= new SplitCommonSuffices().transformUntilComplete();
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".split_suffix.dot"), 0);
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".3.split_suffix.dot"), 0);
didSomeWork |= new MergeCommonSuffices().transformUntilComplete();
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".merge_suffix.dot"), 0);
if ( PRINT_SIMPLIFY_GRAPHS ) printGraph(new File("simplifyGraph." + i + ".4.merge_suffix.dot"), 0);
didSomeWork |= new MergeHeadlessIncomingSources().transformUntilComplete();
didSomeWork |= zipLinearChains();
@ -118,18 +128,8 @@ public class SeqGraph extends BaseGraph<SeqVertex> {
/**
* Zip up all of the simple linear chains present in this graph.
*/
public boolean zipLinearChains() {
boolean foundOne = false;
while( zipOneLinearChain() ) {
// just keep going until zipOneLinearChain says its done
foundOne = true;
}
return foundOne;
}
/**
* Merge together two vertices in the graph v1 -> v2 into a single vertex v' containing v1 + v2 sequence
*
* Merges together all pairs of vertices in the graph v1 -> v2 into a single vertex v' containing v1 + v2 sequence
*
* Only works on vertices where v1's only outgoing edge is to v2 and v2's only incoming edge is from v1.
*
@ -137,44 +137,153 @@ public class SeqGraph extends BaseGraph<SeqVertex> {
*
* @return true if any such pair of vertices could be found, false otherwise
*/
protected boolean zipOneLinearChain() {
for( final BaseEdge e : edgeSet() ) {
final SeqVertex outgoingVertex = getEdgeTarget(e);
final SeqVertex incomingVertex = getEdgeSource(e);
if( !outgoingVertex.equals(incomingVertex)
&& outDegreeOf(incomingVertex) == 1 && inDegreeOf(outgoingVertex) == 1
&& isReferenceNode(incomingVertex) == isReferenceNode(outgoingVertex) ) {
final Set<BaseEdge> outEdges = outgoingEdgesOf(outgoingVertex);
final Set<BaseEdge> inEdges = incomingEdgesOf(incomingVertex);
final BaseEdge singleOutEdge = outEdges.isEmpty() ? null : outEdges.iterator().next();
final BaseEdge singleInEdge = inEdges.isEmpty() ? null : inEdges.iterator().next();
if( inEdges.size() == 1 && outEdges.size() == 1 ) {
singleInEdge.setMultiplicity( singleInEdge.getMultiplicity() + ( e.getMultiplicity() / 2 ) );
singleOutEdge.setMultiplicity( singleOutEdge.getMultiplicity() + ( e.getMultiplicity() / 2 ) );
} else if( inEdges.size() == 1 ) {
singleInEdge.setMultiplicity( Math.max(singleInEdge.getMultiplicity() + ( e.getMultiplicity() - 1 ), 0) );
} else if( outEdges.size() == 1 ) {
singleOutEdge.setMultiplicity( Math.max( singleOutEdge.getMultiplicity() + ( e.getMultiplicity() - 1 ), 0) );
}
final SeqVertex addedVertex = new SeqVertex( ArrayUtils.addAll(incomingVertex.getSequence(), outgoingVertex.getSequence()) );
addVertex(addedVertex);
for( final BaseEdge edge : outEdges ) {
addEdge(addedVertex, getEdgeTarget(edge), new BaseEdge(edge.isRef(), edge.getMultiplicity()));
}
for( final BaseEdge edge : inEdges ) {
addEdge(getEdgeSource(edge), addedVertex, new BaseEdge(edge.isRef(), edge.getMultiplicity()));
}
removeVertex(incomingVertex);
removeVertex(outgoingVertex);
return true;
}
public boolean zipLinearChains() {
// create the list of start sites [doesn't modify graph yet]
final List<SeqVertex> zipStarts = new LinkedList<SeqVertex>();
for ( final SeqVertex source : vertexSet() ) {
if ( isLinearChainStart(source) )
zipStarts.add(source);
}
return false;
if ( zipStarts.isEmpty() ) // nothing to do, as nothing could start a chain
return false;
// At this point, zipStarts contains all of the vertices in this graph that might start some linear
// chain of vertices. We walk through each start, building up the linear chain of vertices and then
// zipping them up with mergeLinearChain, if possible
boolean mergedOne = false;
for ( final SeqVertex zipStart : zipStarts ) {
final LinkedList<SeqVertex> linearChain = traceLinearChain(zipStart);
// merge the linearized chain, recording if we actually did some useful work
mergedOne |= mergeLinearChain(linearChain);
}
return mergedOne;
}
/**
* Is source vertex potentially a start of a linear chain of vertices?
*
* We are a start of a zip chain if our out degree is 1 and either the
* the vertex has no incoming connections or 2 or more (we must start a chain) or
* we have exactly one incoming vertex and that one has out-degree > 1 (i.e., source's incoming
* vertex couldn't be a start itself
*
* @param source a non-null vertex
* @return true if source might start a linear chain
*/
@Requires("source != null")
private boolean isLinearChainStart(final SeqVertex source) {
return outDegreeOf(source) == 1
&& ( inDegreeOf(source) != 1
|| outDegreeOf(incomingVerticesOf(source).iterator().next()) > 1 );
}
/**
* Get all of the vertices in a linear chain of vertices starting at zipStart
*
* Build a list of vertices (in order) starting from zipStart such that each sequential pair of vertices
* in the chain A and B can be zipped together.
*
* @param zipStart a vertex that starts a linear chain
* @return a list of vertices that comprise a linear chain starting with zipStart. The resulting
* list will always contain at least zipStart as the first element.
*/
@Requires("isLinearChainStart(zipStart)")
@Ensures({"result != null", "result.size() >= 1"})
private LinkedList<SeqVertex> traceLinearChain(final SeqVertex zipStart) {
final LinkedList<SeqVertex> linearChain = new LinkedList<SeqVertex>();
linearChain.add(zipStart);
boolean lastIsRef = isReferenceNode(zipStart); // remember because this calculation is expensive
SeqVertex last = zipStart;
while (true) {
if ( outDegreeOf(last) != 1 )
// cannot extend a chain from last if last has multiple outgoing branches
break;
// there can only be one (outgoing edge of last) by contract
final SeqVertex target = getEdgeTarget(outgoingEdgeOf(last));
if ( inDegreeOf(target) != 1 || last.equals(target) )
// cannot zip up a target that has multiple incoming nodes or that's a cycle to the last node
break;
final boolean targetIsRef = isReferenceNode(target);
if ( lastIsRef != targetIsRef ) // both our isRef states must be equal
break;
linearChain.add(target); // extend our chain by one
// update our last state to be the current state, and continue
last = target;
lastIsRef = targetIsRef;
}
return linearChain;
}
/**
* Merge a linear chain of vertices into a single combined vertex, and update this graph to such that
* the incoming edges into the first element of the linearChain and the outgoing edges from linearChain.getLast()
* all point to this new combined vertex.
*
* @param linearChain a non-empty chain of vertices that can be zipped up into a single vertex
* @return true if we actually merged at least two vertices together
*/
protected boolean mergeLinearChain(final LinkedList<SeqVertex> linearChain) {
if ( linearChain.isEmpty() ) throw new IllegalArgumentException("BUG: cannot have linear chain with 0 elements but got " + linearChain);
final SeqVertex first = linearChain.getFirst();
final SeqVertex last = linearChain.getLast();
if ( first == last ) return false; // only one element in the chain, cannot be extended
// create the combined vertex, and add it to the graph
// TODO -- performance problem -- can be optimized if we want
final List<byte[]> seqs = new LinkedList<byte[]>();
for ( SeqVertex v : linearChain ) seqs.add(v.getSequence());
final byte[] seqsCat = org.broadinstitute.sting.utils.Utils.concat(seqs.toArray(new byte[][]{}));
final SeqVertex addedVertex = new SeqVertex( seqsCat );
addVertex(addedVertex);
final Set<BaseEdge> inEdges = incomingEdgesOf(first);
final Set<BaseEdge> outEdges = outgoingEdgesOf(last);
final int nEdges = inEdges.size() + outEdges.size();
int sharedWeightAmongEdges = nEdges == 0 ? 0 : sumEdgeWeightAlongChain(linearChain) / nEdges;
final BaseEdge inc = new BaseEdge(false, sharedWeightAmongEdges); // template to make .add function call easy
// update the incoming and outgoing edges to point to the new vertex
for( final BaseEdge edge : outEdges ) { addEdge(addedVertex, getEdgeTarget(edge), new BaseEdge(edge).add(inc)); }
for( final BaseEdge edge : inEdges ) { addEdge(getEdgeSource(edge), addedVertex, new BaseEdge(edge).add(inc)); }
removeAllVertices(linearChain);
return true;
}
/**
* Get the sum of the edge weights on a linear chain of at least 2 elements
*
* @param chain a linear chain of vertices with at least 2 vertices
* @return the sum of the multiplicities along all edges connecting vertices within the chain
*/
@Requires({"chain != null", "chain.size() >= 2"})
private int sumEdgeWeightAlongChain(final LinkedList<SeqVertex> chain) {
int sum = 0;
SeqVertex prev = null;
for ( final SeqVertex v : chain ) {
if ( prev != null ) {
final BaseEdge e = getEdge(prev, v);
if ( e == null ) throw new IllegalStateException("Something wrong with the linear chain, got a null edge between " + prev + " and " + v);
sum += e.getMultiplicity();
}
prev = v;
}
return sum;
}
/**
@ -273,7 +382,10 @@ public class SeqGraph extends BaseGraph<SeqVertex> {
// actually do the merging, returning true if at least 1 base was successfully split
final SharedVertexSequenceSplitter splitter = new SharedVertexSequenceSplitter(SeqGraph.this, middles);
return splitter.splitAndUpdate(top, bottom, 1);
if (splitter.meetsMinMergableSequenceForEitherPrefixOrSuffix(1))
return splitter.splitAndUpdate(top, bottom);
else
return false;
}
}
@ -306,7 +418,11 @@ public class SeqGraph extends BaseGraph<SeqVertex> {
if ( dontModifyGraphEvenIfPossible() ) return true;
final SharedVertexSequenceSplitter splitter = new SharedVertexSequenceSplitter(SeqGraph.this, tails);
return splitter.splitAndUpdate(top, null, MIN_SUFFIX_TO_MERGE_TAILS);
if (splitter.meetsMinMergableSequenceForSuffix(MIN_COMMON_SEQUENCE_TO_MERGE_SOURCE_SINK_VERTICES))
return splitter.splitAndUpdate(top, null);
else
return false;
}
}
@ -390,7 +506,10 @@ public class SeqGraph extends BaseGraph<SeqVertex> {
if ( dontModifyGraphEvenIfPossible() ) return true;
final SharedVertexSequenceSplitter splitter = new SharedVertexSequenceSplitter(SeqGraph.this, incoming);
return splitter.splitAndUpdate(null, bottom, 1);
if (splitter.meetsMinMergableSequenceForPrefix(MIN_COMMON_SEQUENCE_TO_MERGE_SOURCE_SINK_VERTICES))
return splitter.splitAndUpdate(null, bottom);
else
return false;
}
}
}

View File

@ -70,7 +70,7 @@ import java.util.Arrays;
* @author: depristo
* @since 03/2013
*/
public class SeqVertex extends BaseVertex {
public final class SeqVertex extends BaseVertex {
private static int idCounter = 0;
public final int id;

View File

@ -133,6 +133,14 @@ public class SharedVertexSequenceSplitter {
suffixV = prefixAndSuffix.getSecond();
}
/**
* Given sequencing that are all equal, does this splitter make those into prefix or suffix nodes?
* @return true if we merge equal nodes into prefix nodes or suffix nodes
*/
protected static boolean prefersPrefixMerging() {
return true;
}
/**
* Simple single-function interface to split and then update a graph
*
@ -140,20 +148,41 @@ public class SharedVertexSequenceSplitter {
*
* @param top the top vertex, may be null
* @param bottom the bottom vertex, may be null
* @param minCommonSequence the minimum prefix or suffix size necessary among the vertices to split up
* before we'll go ahead and actually do the splitting. Allows one to determine
* whether there's actually any useful splitting to do, as well as protect
* yourself against spurious splitting of nodes based on trivial amounts of overall
* @return true if some useful splitting was done, false otherwise
*/
public boolean splitAndUpdate(final SeqVertex top, final SeqVertex bottom, final int minCommonSequence) {
if ( prefixV.length() < minCommonSequence && suffixV.length() < minCommonSequence )
return false;
public boolean splitAndUpdate(final SeqVertex top, final SeqVertex bottom) {
split();
updateGraph(top, bottom);
return true;
}
/**
* Does either the common suffix or prefix have at least minCommonSequence bases in it?
* @param minCommonSequence a minimum length of the common sequence, must be >= 0
* @return true if either suffix or prefix length >= minCommonSequence
*/
public boolean meetsMinMergableSequenceForEitherPrefixOrSuffix(final int minCommonSequence) {
return meetsMinMergableSequenceForPrefix(minCommonSequence) || meetsMinMergableSequenceForSuffix(minCommonSequence);
}
/**
* Does the common prefix have at least minCommonSequence bases in it?
* @param minCommonSequence a minimum length of the common sequence, must be >= 0
* @return true if prefix length >= minCommonSequence
*/
public boolean meetsMinMergableSequenceForPrefix(final int minCommonSequence) {
return prefixV.length() >= minCommonSequence;
}
/**
* Does the common suffix have at least minCommonSequence bases in it?
* @param minCommonSequence a minimum length of the common sequence, must be >= 0
* @return true if suffix length >= minCommonSequence
*/
public boolean meetsMinMergableSequenceForSuffix(final int minCommonSequence) {
return suffixV.length() >= minCommonSequence;
}
/**
* Actually do the splitting up of the vertices
*
@ -266,8 +295,8 @@ public class SharedVertexSequenceSplitter {
min = Math.min(min, v.getSequence().length);
}
final int prefixLen = Utils.compPrefixLen(kmers, min);
final int suffixLen = Utils.compSuffixLen(kmers, min - prefixLen);
final int prefixLen = GraphUtils.compPrefixLen(kmers, min);
final int suffixLen = GraphUtils.compSuffixLen(kmers, min - prefixLen);
final byte[] kmer = kmers.get(0);
final byte[] prefix = Arrays.copyOfRange(kmer, 0, prefixLen);

View File

@ -47,7 +47,7 @@
package org.broadinstitute.sting.gatk.walkers.indels;
import net.sf.samtools.SAMRecord;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.QualityUtils;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;

View File

@ -48,7 +48,7 @@ package org.broadinstitute.sting.gatk.walkers.indels;
import com.google.java.contract.Ensures;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.clipping.ReadClipper;
import org.broadinstitute.sting.utils.exceptions.UserException;

View File

@ -0,0 +1,194 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.haplotype;
import com.google.java.contract.Requires;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.LikelihoodCalculationEngine;
import org.broadinstitute.sting.utils.MathUtils;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import java.util.*;
/**
* Computes the likelihood based probability that haplotypes for first and second variant contexts
* only appear in their fully linked form (x11 and x22) given a set of haplotypes where they might occur
* and read likelihoods per sample
*
* User: depristo
* Date: 3/29/13
* Time: 9:23 AM
*/
public class HaplotypeLDCalculator {
private final List<Haplotype> haplotypes;
private final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap;
private List<Map<Haplotype, Double>> haplotypeLikelihoodsPerSample = null;
// linear contigency table with table[0] == [0][0], table[1] = [0][1], table[2] = [1][0], table[3] = [1][1]
private final double[] table = new double[4];
/**
* For testing
*/
protected HaplotypeLDCalculator() {
haplotypes = Collections.emptyList();
haplotypeReadMap = Collections.emptyMap();
}
public HaplotypeLDCalculator(List<Haplotype> haplotypes, Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap) {
this.haplotypes = haplotypes;
this.haplotypeReadMap = haplotypeReadMap;
}
/**
* Construct the cached list of summed haplotype likelihoods per sample if it
* hasn't already been computed. This data structure is lazy created but only
* needs to be made once when we make 1 merge decision as the data doesn't change
* no matter how many calls to computeProbOfBeingPhased
*/
private void buildHaplotypeLikelihoodsPerSampleIfNecessary() {
if ( haplotypeLikelihoodsPerSample == null ) {
// do the lazy computation
final Set<String> samples = haplotypeReadMap.keySet();
haplotypeLikelihoodsPerSample = new LinkedList<Map<Haplotype, Double>>();
for( final String sample : samples ) {
final Map<Haplotype, Double> map = new HashMap<Haplotype, Double>(haplotypes.size());
for( final Haplotype h : haplotypes ) {
// count up the co-occurrences of the events for the R^2 calculation
final double haplotypeLikelihood = LikelihoodCalculationEngine.computeDiploidHaplotypeLikelihoods(sample, haplotypeReadMap, Collections.singletonList(Allele.create(h, true)), false)[0][0];
map.put(h, haplotypeLikelihood);
}
haplotypeLikelihoodsPerSample.add(map);
}
}
}
/**
* Compute the likelihood based probability that that haplotypes for first and second are only x11 and x22
*
* As opposed to the hypothesis that all four haplotypes (x11, x12, x21, and x22) exist in the population
*
* @param first a non-null VariantContext
* @param second a non-null VariantContext
* @return the probability that only x11 and x22 exist among the samples
*/
protected double computeProbOfBeingPhased(final VariantContext first, final VariantContext second) {
buildHaplotypeLikelihoodsPerSampleIfNecessary();
Arrays.fill(table, Double.NEGATIVE_INFINITY);
for ( final Map<Haplotype, Double> entry : haplotypeLikelihoodsPerSample ) {
for ( final Map.Entry<Haplotype, Double> haplotypeLikelihood : entry.entrySet() ) {
final Haplotype h = haplotypeLikelihood.getKey();
// count up the co-occurrences of the events for the R^2 calculation
final VariantContext thisHapVC = h.getEventMap().get(first.getStart());
final VariantContext nextHapVC = h.getEventMap().get(second.getStart()); // TODO -- add function to take a VC
final int i = thisHapVC == null ? 0 : 1;
final int j = nextHapVC == null ? 0 : 1;
final int index = 2 * i + j;
table[index] = MathUtils.approximateLog10SumLog10(table[index], haplotypeLikelihood.getValue());
}
}
return pPhased(table);
}
/**
* Compute probability that two variants are in phase with each other and that no
* compound hets exist in the population.
*
* Implemented as a likelihood ratio test of the hypothesis:
*
* x11 and x22 are the only haplotypes in the populations
*
* vs.
*
* all four haplotype combinations (x11, x12, x21, and x22) all exist in the population.
*
* Now, since we have to have both variants in the population, we exclude the x11 & x11 state. So the
* p of having just x11 and x22 is P(x11 & x22) + p(x22 & x22).
*
* Alternatively, we might have any configuration that gives us both 1 and 2 alts, which are:
*
* - P(x11 & x12 & x21) -- we have hom-ref and both hets
* - P(x22 & x12 & x21) -- we have hom-alt and both hets
* - P(x22 & x12) -- one haplotype is 22 and the other is het 12
* - P(x22 & x21) -- one haplotype is 22 and the other is het 21
*
* The probability is just p11_22 / (p11_22 + p hets)
*
* @table linear contigency table with table[0] == [0][0], table[1] = [0][1], table[2] = [1][0], table[3] = [1][1]
* doesn't have to be normalized as this function does the normalization internally
* @return the real space probability that the data is phased
*/
@Requires("table.length == 4")
protected double pPhased( double[] table ) {
final double[] normTable = MathUtils.normalizeFromLog10(table, true);
final double x11 = normTable[0], x12 = normTable[1], x21 = normTable[2], x22 = normTable[3];
// probability that we are only x11 && x22
final double p11_22 = MathUtils.approximateLog10SumLog10(x11 + x22, x22 + x22);
// probability of having any of the other pairs
final double p11_12_21 = MathUtils.approximateLog10SumLog10(x11 + x12, x11 + x21, x12 + x21);
final double p22_12_21 = MathUtils.approximateLog10SumLog10(x22 + x12, x22 + x21, x12 + x21);
final double p22_12 = x22 + x12;
final double p22_21 = x22 + x21;
final double pOthers = MathUtils.approximateLog10SumLog10(new double[]{p11_12_21, p22_12_21, p22_12, p22_21});
// probability of being phases is the ratio of p11_22 / pOthers which in log space is just a substraction
final double log10phased = p11_22 - (MathUtils.approximateLog10SumLog10(p11_22, pOthers));
return Math.pow(10.0, log10phased);
}
protected double pPhasedTest( final double x11, final double x12, final double x21, final double x22 ) {
return pPhased(new double[]{x11, x12, x21, x22});
}
}

View File

@ -0,0 +1,305 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.haplotype;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
import java.util.*;
/**
* Merges VariantContexts in a series of haplotypes according to their pairwise LD
*
* User: depristo
* Date: 3/28/13
* Time: 6:17 PM
*/
public class LDMerger extends MergeVariantsAcrossHaplotypes {
private final static Logger logger = Logger.getLogger(LDMerger.class);
private final boolean DEBUG;
private final int minSamplesToMergeSNPs;
private final int minSamplesToMergeOtherEvents;
public LDMerger(boolean DEBUG, int minSamplesToMergeSNPs, int minSamplesToMergeOtherEvents) {
super();
this.DEBUG = DEBUG;
this.minSamplesToMergeSNPs = minSamplesToMergeSNPs;
this.minSamplesToMergeOtherEvents = minSamplesToMergeOtherEvents;
}
protected LDMerger() {
this(false, 1, 1);
}
// TODO -- should be class arguments and static variables in HC
protected final static int MAX_DISTANCE_BETWEEN_SNPS_TO_MERGE = 6;
protected final static int MAX_DISTANCE_BETWEEN_OTHER_EVENTS_TO_MERGE = 25;
/**
* We require 99% confidence that only the phased haplotypes exist in the population to merge the records
*/
protected final static double MERGE_EVENTS_PROB_PHASED_THRESHOLD = 0.99;
/**
* Merge as many events among the haplotypes as possible based on pairwise LD among variants
*
* @param haplotypes a list of haplotypes whose events we want to merge
* @param haplotypeReadMap map from sample name -> read likelihoods for each haplotype
* @param startPosKeySet a set of starting positions of all events among the haplotypes
* @param ref the reference bases
* @param refLoc the span of the reference bases
*/
@Override
public boolean merge( final List<Haplotype> haplotypes,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final TreeSet<Integer> startPosKeySet,
final byte[] ref,
final GenomeLoc refLoc ) {
if ( haplotypes == null ) throw new IllegalArgumentException("haplotypes cannot be null");
if ( haplotypeReadMap == null ) throw new IllegalArgumentException("haplotypeReadMap cannot be null");
if ( startPosKeySet == null ) throw new IllegalArgumentException("startPosKeySet cannot be null");
if ( ref == null ) throw new IllegalArgumentException("ref cannot be null");
if ( refLoc == null ) throw new IllegalArgumentException("refLoc cannot be null");
if ( refLoc.size() != ref.length ) throw new IllegalArgumentException("refLoc size " + refLoc.size() + " != ref.length " + ref.length + " at " + refLoc);
if( startPosKeySet.size() <= 1 ) { return false; }
final int nSamples = haplotypeReadMap.keySet().size();
final HaplotypeLDCalculator r2Calculator = new HaplotypeLDCalculator(haplotypes, haplotypeReadMap);
boolean somethingWasMerged = false;
boolean mapWasUpdated = true;
while( mapWasUpdated ) {
mapWasUpdated = mergeConsecutiveEventsBasedOnLDOnce(haplotypes, r2Calculator, nSamples, startPosKeySet, ref, refLoc);
somethingWasMerged |= mapWasUpdated;
}
return somethingWasMerged;
}
/**
* Merge the next pair of events, if possible
*
* @param haplotypes a list of haplotypes whose events we want to merge
* @param ldCalculator calculates R^2 for pairs of events on demand
* @param startPosKeySet a set of starting positions of all events among the haplotypes
* @param ref the reference bases
* @param refLoc the span of the reference bases
* @return true if something was merged, false otherwise
*/
protected boolean mergeConsecutiveEventsBasedOnLDOnce( final List<Haplotype> haplotypes,
final HaplotypeLDCalculator ldCalculator,
final int nSamples,
final TreeSet<Integer> startPosKeySet,
final byte[] ref,
final GenomeLoc refLoc ) {
// loop over the set of start locations and consider pairs that start near each other
final Iterator<Integer> iter = startPosKeySet.iterator();
int thisStart = iter.next();
while( iter.hasNext() ) {
final int nextStart = iter.next();
final LDMergeData toMerge = getPairOfEventsToMerge(haplotypes, thisStart, nextStart);
if ( toMerge.canBeMerged(nSamples) ) {
final double pPhased = ldCalculator.computeProbOfBeingPhased(toMerge.firstVC, toMerge.secondVC);
if( DEBUG ) {
logger.info("Found consecutive biallelic events with R^2 = " + String.format("%.4f", pPhased));
logger.info("-- " + toMerge.firstVC);
logger.info("-- " + toMerge.secondVC);
}
if( pPhased > MERGE_EVENTS_PROB_PHASED_THRESHOLD) {
final VariantContext mergedVC = createMergedVariantContext(toMerge.firstVC, toMerge.secondVC, ref, refLoc);
// if for some reason the merging resulting in a bad allele, mergedVC will be null, and we will just remove first and second
replaceVariantContextsInMap(haplotypes, startPosKeySet, mergedVC, toMerge.firstVC, toMerge.secondVC);
return true; // break out of tree set iteration since it was just updated, start over from the beginning and keep merging events
}
}
thisStart = nextStart;
}
return false;
}
/**
* Info about potential LD merge of two variant contexts
*/
private class LDMergeData {
VariantContext firstVC = null, secondVC = null;
boolean canBeMerged = true;
/** Tell this object that it cant be merged for some reason */
public LDMergeData cantBeMerged() {
canBeMerged = false;
return this;
}
/**
* Can these two events be merged
* @param nSamples the number of samples we're considering
* @return true if we can merge our two variant contexts
*/
public boolean canBeMerged(final int nSamples) {
if ( ! canBeMerged || firstVC == null || secondVC == null )
return false;
final int distance = secondVC.getStart() - firstVC.getEnd();
if ( firstVC.isSNP() && secondVC.isSNP() ) {
return nSamples >= minSamplesToMergeSNPs && distance <= MAX_DISTANCE_BETWEEN_SNPS_TO_MERGE;
} else {
return nSamples >= minSamplesToMergeOtherEvents && distance <= MAX_DISTANCE_BETWEEN_OTHER_EVENTS_TO_MERGE;
}
}
}
/**
* Get the information about the potential merge of two events starting at thisStart and nextStart
* @param haplotypes our haplotypes
* @param thisStart the starting position of the first event to merge
* @param nextStart the starting position of the next event to merge
* @return
*/
private LDMergeData getPairOfEventsToMerge(final List<Haplotype> haplotypes, final int thisStart, final int nextStart) {
final LDMergeData mergeData = new LDMergeData();
for( final Haplotype h : haplotypes ) {
// only make complex substitutions out of consecutive biallelic sites
final VariantContext thisHapVC = h.getEventMap().get(thisStart);
if( thisHapVC != null && !thisHapVC.isSymbolic() ) { // something was found at this location on this haplotype
if( mergeData.firstVC == null ) {
mergeData.firstVC = thisHapVC;
} else if( !thisHapVC.hasSameAllelesAs( mergeData.firstVC) ) {
return mergeData.cantBeMerged();
}
}
final VariantContext nextHapVC = h.getEventMap().get(nextStart);
if( nextHapVC != null && !nextHapVC.isSymbolic() ) { // something was found at the next location on this haplotype
if( mergeData.secondVC == null ) {
mergeData.secondVC = nextHapVC;
} else if( !nextHapVC.hasSameAllelesAs( mergeData.secondVC) ) {
return mergeData.cantBeMerged();
}
}
}
// don't try to merge overlapping events
if ( mergeData.firstVC != null && mergeData.secondVC != null && mergeData.firstVC.getEnd() >= mergeData.secondVC.getStart() )
return mergeData.cantBeMerged();
return mergeData;
}
// BUGBUG: make this merge function more general
protected VariantContext createMergedVariantContext( final VariantContext thisVC, final VariantContext nextVC, final byte[] ref, final GenomeLoc refLoc ) {
final int thisStart = thisVC.getStart();
final int nextStart = nextVC.getStart();
byte[] refBases = new byte[]{};
byte[] altBases = new byte[]{};
refBases = ArrayUtils.addAll(refBases, thisVC.getReference().getBases());
altBases = ArrayUtils.addAll(altBases, thisVC.getAlternateAllele(0).getBases());
int locus;
for( locus = thisStart + refBases.length; locus < nextStart; locus++ ) {
final byte refByte = ref[locus - refLoc.getStart()];
refBases = ArrayUtils.add(refBases, refByte);
altBases = ArrayUtils.add(altBases, refByte);
}
refBases = ArrayUtils.addAll(refBases, ArrayUtils.subarray(nextVC.getReference().getBases(), locus > nextStart ? 1 : 0, nextVC.getReference().getBases().length)); // special case of deletion including the padding base of consecutive indel
altBases = ArrayUtils.addAll(altBases, nextVC.getAlternateAllele(0).getBases());
int iii = 0;
if( refBases.length == altBases.length ) { // insertion + deletion of same length creates an MNP --> trim common prefix bases off the beginning of the allele
while( iii < refBases.length && refBases[iii] == altBases[iii] ) { iii++; }
if ( iii == refBases.length ) {
// we've become a null allele, such as with CA/C + A/AA -> CA/CA => after trimming there's nothing left
// so return a null variant context so we can eliminate the variants from consideration
return null;
}
}
final Allele refAllele = Allele.create( ArrayUtils.subarray(refBases, iii, refBases.length), true );
final Allele altAllele = Allele.create( ArrayUtils.subarray(altBases, iii, altBases.length), false );
return new VariantContextBuilder("merged", thisVC.getChr(), thisVC.getStart() + iii, nextVC.getEnd(), Arrays.asList(refAllele, altAllele)).make();
}
/**
* Update the event maps in all haplotypes to replace a replacement of update1 and 2 with replacement
*
* @param haplotypes the haplotypes whose event maps we need to update
* @param startPosKeySet a sorted set of start positions that we must update
* @param replacement a VariantContext to replace update1 and update2 with. Can be null, indicating that we just want to remove update1 and update2
* @param update1 the first VC we want to update
* @param update2 the second VC we want to update
*/
private void replaceVariantContextsInMap(final List<Haplotype> haplotypes,
final TreeSet<Integer> startPosKeySet,
final VariantContext replacement,
final VariantContext update1, final VariantContext update2) {
// remove the old event from the eventMap on every haplotype and the start pos key set, replace with merged event
for( final Haplotype h : haplotypes ) {
// if we had both events, add replacement. In some cases the haplotype may not have both
// events but they were still merged because the haplotype isn't a particularly informative
// haplotype in any case. The order of operations here is important because we are modifying the map
final boolean shouldAdd = h.getEventMap().containsKey(update1.getStart()) && h.getEventMap().containsKey(update2.getStart());
h.getEventMap().remove(update1.getStart());
h.getEventMap().remove(update2.getStart());
if ( shouldAdd && replacement != null ) {
h.getEventMap().addVC(replacement, false); // cannot merge we other events at the same position
}
}
startPosKeySet.remove(update1.getStart());
startPosKeySet.remove(update2.getStart());
if ( replacement != null ) startPosKeySet.add(replacement.getStart());
}
}

View File

@ -0,0 +1,79 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.haplotype;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
/**
* Baseclass for code that wants to merge variants together in the haplotype caller
*
* This root class is basically a no-op, and can be used to not do any merging
*/
public class MergeVariantsAcrossHaplotypes {
/**
* Merge variants across the haplotypes, updating the haplotype event maps and startPos set as appropriate
*
* @param haplotypes a list of haplotypes whose events we want to merge
* @param haplotypeReadMap map from sample name -> read likelihoods for each haplotype
* @param startPosKeySet a set of starting positions of all events among the haplotypes
* @param ref the reference bases
* @param refLoc the span of the reference bases
* @return true if anything was merged
*/
public boolean merge( final List<Haplotype> haplotypes,
final Map<String, PerReadAlleleLikelihoodMap> haplotypeReadMap,
final TreeSet<Integer> startPosKeySet,
final byte[] ref,
final GenomeLoc refLoc ) {
return false;
}
}

View File

@ -57,7 +57,7 @@ import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.DeBruijnGraph;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;

View File

@ -56,6 +56,7 @@ import net.sf.picard.reference.ReferenceSequenceFile;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.*;
import org.broadinstitute.sting.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
@ -198,7 +199,8 @@ public class GenotypingEngineUnitTest extends BaseTest {
public Map<Integer,VariantContext> calcAlignment() {
final SWPairwiseAlignment alignment = new SWPairwiseAlignment(ref, hap);
return GenotypingEngine.generateVCsFromAlignment( new Haplotype(hap), alignment.getAlignmentStart2wrt1(), alignment.getCigar(), ref, hap, genomeLocParser.createGenomeLoc("4",1,1+ref.length), "name");
final Haplotype h = new Haplotype(hap, false, alignment.getAlignmentStart2wrt1(), alignment.getCigar());
return GenotypingEngine.generateVCsFromAlignment( h, ref, genomeLocParser.createGenomeLoc("4",1,1+ref.length), "name");
}
}
@ -277,148 +279,6 @@ public class GenotypingEngineUnitTest extends BaseTest {
Assert.assertTrue(compareVCMaps(calculatedMap, expectedMap));
}
/**
* Tests that we get the right values from the R^2 calculation
*/
@Test
public void testCalculateR2LD() {
logger.warn("Executing testCalculateR2LD");
Assert.assertEquals(GenotypingEngine.calculateR2LD(1,1,1,1), 0.0, 0.00001);
Assert.assertEquals(GenotypingEngine.calculateR2LD(100,100,100,100), 0.0, 0.00001);
Assert.assertEquals(GenotypingEngine.calculateR2LD(1,0,0,1), 1.0, 0.00001);
Assert.assertEquals(GenotypingEngine.calculateR2LD(100,0,0,100), 1.0, 0.00001);
Assert.assertEquals(GenotypingEngine.calculateR2LD(1,2,3,4), (0.1 - 0.12) * (0.1 - 0.12) / (0.3 * 0.7 * 0.4 * 0.6), 0.00001);
}
@Test
public void testCreateMergedVariantContext() {
logger.warn("Executing testCreateMergedVariantContext");
final byte[] ref = "AATTCCGGAATTCCGGAATT".getBytes();
final GenomeLoc refLoc = genomeLocParser.createGenomeLoc("2", 1700, 1700 + ref.length);
// SNP + SNP = simple MNP
VariantContext thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
VariantContext nextVC = new VariantContextBuilder().loc("2", 1704, 1704).alleles("C","G").make();
VariantContext truthVC = new VariantContextBuilder().loc("2", 1703, 1704).alleles("TC","GG").source("merged").make();
VariantContext mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// SNP + ref + SNP = MNP with ref base gap
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","G").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","GCG").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + SNP
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TAAAAA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","G").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","TAAAAACG").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// SNP + insertion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","CAAAAA").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","GCCAAAAA").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// deletion + SNP
thisVC = new VariantContextBuilder().loc("2", 1703, 1704).alleles("TC","T").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","G").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","TG").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// SNP + deletion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1706).alleles("TCCG","GCC").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + deletion = MNP
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1704, 1706).alleles("CCG","ACC").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + deletion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TAAAAA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1706).alleles("TCCG","TAAAAACC").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + insertion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","CA").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","TACCA").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// deletion + deletion
thisVC = new VariantContextBuilder().loc("2", 1701, 1702).alleles("AT","A").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1701, 1706).alleles("ATTCCG","ATCC").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// deletion + insertion (abutting)
thisVC = new VariantContextBuilder().loc("2", 1701, 1702).alleles("AT","A").make();
nextVC = new VariantContextBuilder().loc("2", 1702, 1702).alleles("T","GCGCGC").make();
truthVC = new VariantContextBuilder().loc("2", 1701, 1702).alleles("AT","AGCGCGC").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// complex + complex
thisVC = new VariantContextBuilder().loc("2", 1703, 1704).alleles("TC","AAA").make();
nextVC = new VariantContextBuilder().loc("2", 1706, 1707).alleles("GG","AC").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1707).alleles("TCCGG","AAACAC").source("merged").make();
mergedVC = GenotypingEngine.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
}
/**
* Private function to compare Map of VCs, it only checks the types and start locations of the VariantContext
*/

View File

@ -64,7 +64,7 @@ public class HaplotypeCallerComplexAndSymbolicVariantsIntegrationTest extends Wa
@Test
public void testHaplotypeCallerMultiSampleComplex() {
HCTestComplexVariants(privateTestDir + "AFR.complex.variants.bam", "", "7b67ac6213b7a6f759057fb9d7148fdc");
HCTestComplexVariants(privateTestDir + "AFR.complex.variants.bam", "", "80b9280b1e65952f60ba2fd738d4840f");
}
private void HCTestSymbolicVariants(String bam, String args, String md5) {
@ -88,12 +88,12 @@ public class HaplotypeCallerComplexAndSymbolicVariantsIntegrationTest extends Wa
@Test
public void testHaplotypeCallerMultiSampleGGAComplex() {
HCTestComplexGGA(NA12878_CHR20_BAM, "-L 20:119673-119823 -L 20:121408-121538",
"eb41ed6f1d692368a0f67311d139a38a");
"125e93deeb3b390a14d9b777aa2a220f");
}
@Test
public void testHaplotypeCallerMultiSampleGGAMultiAllelic() {
HCTestComplexGGA(NA12878_CHR20_BAM, "-L 20:133041-133161 -L 20:300207-300337",
"c4c33c962aca12c51def9b8cde35b7d2");
"6957fd0e8a5bc66d2572a6ca8626fa7a");
}
}

View File

@ -47,12 +47,15 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
import net.sf.picard.reference.IndexedFastaSequenceFile;
import org.broad.tribble.TribbleIndexedFeatureReader;
import org.broadinstitute.sting.WalkerTest;
import org.broadinstitute.sting.gatk.GenomeAnalysisEngine;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.collections.Pair;
import org.broadinstitute.sting.utils.variant.GATKVCFUtils;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.vcf.VCFCodec;
import org.testng.annotations.Test;
import java.io.File;
@ -77,12 +80,12 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerMultiSample() {
HCTest(CEUTRIO_BAM, "", "f132843e3c8e065a783cc4fdf9ee5df3");
HCTest(CEUTRIO_BAM, "", "6fa37c449a800bcd59069be03ad2fff2");
}
@Test
public void testHaplotypeCallerSingleSample() {
HCTest(NA12878_BAM, "", "15e0201f5c478310d278d2d03483c152");
HCTest(NA12878_BAM, "", "6140447b34bd1d08b3ed4d473d2c2f23");
}
@Test(enabled = false) // can't annotate the rsID's yet
@ -93,7 +96,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerMultiSampleGGA() {
HCTest(CEUTRIO_BAM, "--max_alternate_alleles 3 -gt_mode GENOTYPE_GIVEN_ALLELES -out_mode EMIT_ALL_SITES -alleles " + validationDataLocation + "combined.phase1.chr20.raw.indels.sites.vcf",
"48d309aed0cdc40cc983eeb5a8d12f53");
"cbd119f3d37a9af0b3539c13b8053bd9");
}
@Test
@ -109,7 +112,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerSingleSampleIndelQualityScores() {
HCTestIndelQualityScores(NA12878_RECALIBRATED_BAM, "", "34c7fcfe17a1d835e2dc403df9eb3591");
HCTestIndelQualityScores(NA12878_RECALIBRATED_BAM, "", "9eeeada2f7145adfe08f538aad704982");
}
private void HCTestNearbySmallIntervals(String bam, String args, String md5) {
@ -146,7 +149,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void testHaplotypeCallerNearbySmallIntervals() {
HCTestNearbySmallIntervals(NA12878_BAM, "", "eae65d20836d6c6ebca9e25e33566f74");
HCTestNearbySmallIntervals(NA12878_BAM, "", "16ecd2f282bcb10dc32e7f3fe714a000");
}
// This problem bam came from a user on the forum and it spotted a problem where the ReadClipper
@ -156,14 +159,14 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
@Test
public void HCTestProblematicReadsModifiedInActiveRegions() {
final String base = String.format("-T HaplotypeCaller -R %s -I %s", REF, privateTestDir + "haplotype-problem-4.bam") + " --no_cmdline_in_header -o %s -minPruning 3 -L 4:49139026-49139965";
final WalkerTestSpec spec = new WalkerTestSpec(base, Arrays.asList("a3d74040a4966bf7a04cbd4924970685"));
final WalkerTestSpec spec = new WalkerTestSpec(base, Arrays.asList("0689d2c202849fd05617648eaf429b9a"));
executeTest("HCTestProblematicReadsModifiedInActiveRegions: ", spec);
}
@Test
public void HCTestStructuralIndels() {
final String base = String.format("-T HaplotypeCaller -R %s -I %s", REF, privateTestDir + "AFR.structural.indels.bam") + " --no_cmdline_in_header -o %s -minPruning 6 -L 20:8187565-8187800 -L 20:18670537-18670730";
final WalkerTestSpec spec = new WalkerTestSpec(base, Arrays.asList("40da88ed3722c512264b72db37f18720"));
final WalkerTestSpec spec = new WalkerTestSpec(base, Arrays.asList("e8466846ca420bcbcd52b97f7a661aa3"));
executeTest("HCTestStructuralIndels: ", spec);
}
@ -185,7 +188,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
public void HCTestReducedBam() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T HaplotypeCaller -R " + b37KGReference + " --no_cmdline_in_header -I " + privateTestDir + "bamExample.ReducedRead.ADAnnotation.bam -o %s -L 1:67,225,396-67,288,518", 1,
Arrays.asList("69b83d578c14ed32d08ce4e7ff8a8a18"));
Arrays.asList("e30b974b038293841e6be23c93ce76e1"));
executeTest("HC calling on a ReducedRead BAM", spec);
}
@ -193,7 +196,7 @@ public class HaplotypeCallerIntegrationTest extends WalkerTest {
public void testReducedBamWithReadsNotFullySpanningDeletion() {
WalkerTest.WalkerTestSpec spec = new WalkerTest.WalkerTestSpec(
"-T HaplotypeCaller -R " + b37KGReference + " --no_cmdline_in_header -I " + privateTestDir + "reduced.readNotFullySpanningDeletion.bam -o %s -L 1:167871297", 1,
Arrays.asList("0cae60d86a3f86854699217a30ece3e3"));
Arrays.asList("a913849c7ebdefb23ef9fa5ec05960fd"));
executeTest("test calling on a ReducedRead BAM where the reads do not fully span a deletion", spec);
}
}

View File

@ -50,38 +50,35 @@ import org.broadinstitute.sting.BaseTest;
import org.testng.Assert;
import org.testng.annotations.Test;
public class KMerErrorCorrectorUnitTest extends BaseTest {
public class KMerCounterUnitTest extends BaseTest {
@Test
public void testMyData() {
final KMerErrorCorrector corrector = new KMerErrorCorrector(3, 1, 2, 2);
final KMerCounter counter = new KMerCounter(3);
Assert.assertNotNull(corrector.toString());
Assert.assertNotNull(counter.toString());
corrector.addKmers(
counter.addKmers(
"ATG", "ATG", "ATG", "ATG",
"ACC", "ACC", "ACC",
"AAA", "AAA",
"CTG", // -> ATG
"NNA", // -> AAA
"CCC", // => ACC
"NNN", // => null
"NNC" // => ACC [because of min count won't go to NNA]
"CTG",
"NNA",
"CCC"
);
testCorrection(corrector, "ATG", "ATG");
testCorrection(corrector, "ACC", "ACC");
testCorrection(corrector, "AAA", "AAA");
testCorrection(corrector, "CTG", "ATG");
testCorrection(corrector, "NNA", "AAA");
testCorrection(corrector, "CCC", "ACC");
testCorrection(corrector, "NNN", null);
testCorrection(corrector, "NNC", "ACC");
testCounting(counter, "ATG", 4);
testCounting(counter, "ACC", 3);
testCounting(counter, "AAA", 2);
testCounting(counter, "CTG", 1);
testCounting(counter, "NNA", 1);
testCounting(counter, "CCC", 1);
testCounting(counter, "NNN", 0);
testCounting(counter, "NNC", 0);
Assert.assertNotNull(corrector.toString());
Assert.assertNotNull(counter.toString());
}
private void testCorrection(final KMerErrorCorrector corrector, final String in, final String out) {
Assert.assertEquals(corrector.getErrorCorrectedKmer(in), out);
Assert.assertEquals(corrector.getErrorCorrectedKmer(in.getBytes()), out == null ? null : out.getBytes());
private void testCounting(final KMerCounter counter, final String in, final int expectedCount) {
Assert.assertEquals(counter.getKmerCount(in.getBytes()), expectedCount);
}
}

View File

@ -53,14 +53,10 @@ package org.broadinstitute.sting.gatk.walkers.haplotypecaller;
*/
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.MathUtils;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.*;
/**
* Unit tests for LikelihoodCalculationEngine
*/

View File

@ -241,9 +241,11 @@ public class BaseGraphUnitTest extends BaseTest {
graph.printGraph(tmp, 10);
}
private void assertVertexSetEquals(final Set<SeqVertex> actual, final SeqVertex ... expected) {
private void assertVertexSetEquals(final Collection<SeqVertex> actual, final SeqVertex ... expected) {
final Set<SeqVertex> actualSet = new HashSet<SeqVertex>(actual);
Assert.assertEquals(actualSet.size(), actual.size(), "Duplicate elements found in vertex list");
final Set<SeqVertex> expectedSet = expected == null ? Collections.<SeqVertex>emptySet() : new HashSet<SeqVertex>(Arrays.asList(expected));
Assert.assertEquals(actual, expectedSet);
Assert.assertEquals(actualSet, expectedSet);
}
@Test(enabled = true)

View File

@ -55,10 +55,7 @@ import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.*;
/**
* Created with IntelliJ IDEA.
@ -70,15 +67,13 @@ public class KBestPathsUnitTest {
@DataProvider(name = "BasicPathFindingData")
public Object[][] makeBasicPathFindingData() {
List<Object[]> tests = new ArrayList<Object[]>();
// for ( final int nStartNodes : Arrays.asList(1) ) {
// for ( final int nBranchesPerBubble : Arrays.asList(2) ) {
// for ( final int nEndNodes : Arrays.asList(1) ) {
// for ( final boolean addCycle : Arrays.asList(true) ) {
for ( final int nStartNodes : Arrays.asList(1, 2, 3) ) {
for ( final int nBranchesPerBubble : Arrays.asList(2, 3) ) {
for ( final int nEndNodes : Arrays.asList(1, 2, 3) ) {
for ( final boolean addCycle : Arrays.asList(true, false) ) {
tests.add(new Object[]{nStartNodes, nBranchesPerBubble, nEndNodes, addCycle});
for ( final boolean allowCycles : Arrays.asList(false, true)) {
for ( final int nStartNodes : Arrays.asList(1, 2, 3) ) {
for ( final int nBranchesPerBubble : Arrays.asList(2, 3) ) {
for ( final int nEndNodes : Arrays.asList(1, 2, 3) ) {
for ( final boolean addCycle : Arrays.asList(true, false) ) {
tests.add(new Object[]{nStartNodes, nBranchesPerBubble, nEndNodes, addCycle, allowCycles});
}
}
}
}
@ -88,9 +83,9 @@ public class KBestPathsUnitTest {
}
private static int weight = 1;
final List<SeqVertex> createVertices(final SeqGraph graph, final int n, final SeqVertex source, final SeqVertex target) {
final Set<SeqVertex> createVertices(final SeqGraph graph, final int n, final SeqVertex source, final SeqVertex target) {
final List<String> seqs = Arrays.asList("A", "C", "G", "T");
final List<SeqVertex> vertices = new LinkedList<SeqVertex>();
final Set<SeqVertex> vertices = new LinkedHashSet<SeqVertex>();
for ( int i = 0; i < n; i++ ) {
final SeqVertex v = new SeqVertex(seqs.get(i));
graph.addVertex(v);
@ -102,22 +97,22 @@ public class KBestPathsUnitTest {
}
@Test(dataProvider = "BasicPathFindingData", enabled = true)
public void testBasicPathFinding(final int nStartNodes, final int nBranchesPerBubble, final int nEndNodes, final boolean addCycle) {
public void testBasicPathFinding(final int nStartNodes, final int nBranchesPerBubble, final int nEndNodes, final boolean addCycle, final boolean allowCycles) {
SeqGraph graph = new SeqGraph();
final SeqVertex middleTop = new SeqVertex("GTAC");
final SeqVertex middleBottom = new SeqVertex("ACTG");
graph.addVertices(middleTop, middleBottom);
final List<SeqVertex> starts = createVertices(graph, nStartNodes, null, middleTop);
final List<SeqVertex> bubbles = createVertices(graph, nBranchesPerBubble, middleTop, middleBottom);
final List<SeqVertex> ends = createVertices(graph, nEndNodes, middleBottom, null);
final Set<SeqVertex> starts = createVertices(graph, nStartNodes, null, middleTop);
final Set<SeqVertex> bubbles = createVertices(graph, nBranchesPerBubble, middleTop, middleBottom);
final Set<SeqVertex> ends = createVertices(graph, nEndNodes, middleBottom, null);
if ( addCycle ) graph.addEdge(middleBottom, middleBottom);
// enumerate all possible paths
final List<Path<SeqVertex>> paths = new KBestPaths<SeqVertex>().getKBestPaths(graph);
final List<Path<SeqVertex>> paths = new KBestPaths<SeqVertex>(allowCycles).getKBestPaths(graph, starts, ends);
final int expectedNumOfPaths = nStartNodes * nBranchesPerBubble * (addCycle ? 2 : 1) * nEndNodes;
final int expectedNumOfPaths = nStartNodes * nBranchesPerBubble * (addCycle && allowCycles ? 2 : 1) * nEndNodes;
Assert.assertEquals(paths.size(), expectedNumOfPaths, "Didn't find the expected number of paths");
int lastScore = Integer.MAX_VALUE;
@ -128,11 +123,47 @@ public class KBestPathsUnitTest {
// get the best path, and make sure it's the same as our optimal path overall
final Path best = paths.get(0);
final List<Path<SeqVertex>> justOne = new KBestPaths<SeqVertex>().getKBestPaths(graph, 1);
final List<Path<SeqVertex>> justOne = new KBestPaths<SeqVertex>(allowCycles).getKBestPaths(graph, 1, starts, ends);
Assert.assertEquals(justOne.size(), 1);
Assert.assertTrue(justOne.get(0).pathsAreTheSame(best), "Best path from complete enumerate " + best + " not the same as from k = 1 search " + justOne.get(0));
}
@Test
public void testPathFindingComplexCycle() {
SeqGraph graph = new SeqGraph();
final SeqVertex v1 = new SeqVertex("A");
final SeqVertex v2 = new SeqVertex("C");
final SeqVertex v3 = new SeqVertex("G");
final SeqVertex v4 = new SeqVertex("T");
final SeqVertex v5 = new SeqVertex("AA");
graph.addVertices(v1, v2, v3, v4, v5);
graph.addEdges(v1, v2, v3, v4, v5);
graph.addEdges(v3, v3);
graph.addEdges(v4, v2);
// enumerate all possible paths
final List<Path<SeqVertex>> paths = new KBestPaths<SeqVertex>(false).getKBestPaths(graph, v1, v5);
Assert.assertEquals(paths.size(), 1, "Didn't find the expected number of paths");
}
@Test
public void testPathFindingCycleLastNode() {
SeqGraph graph = new SeqGraph();
final SeqVertex v1 = new SeqVertex("A");
final SeqVertex v2 = new SeqVertex("C");
final SeqVertex v3 = new SeqVertex("G");
graph.addVertices(v1, v2, v3);
graph.addEdges(v1, v2, v3, v3);
// enumerate all possible paths
final List<Path<SeqVertex>> paths = new KBestPaths<SeqVertex>(false).getKBestPaths(graph, v1, v3);
Assert.assertEquals(paths.size(), 1, "Didn't find the expected number of paths");
}
@DataProvider(name = "BasicBubbleDataProvider")
public Object[][] makeBasicBubbleDataProvider() {
List<Object[]> tests = new ArrayList<Object[]>();

View File

@ -47,15 +47,20 @@
package org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.Utils;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
public class SeqGraphUnitTest extends BaseTest {
private final static boolean DEBUG = false;
private class MergeNodesWithNoVariationTestProvider extends TestDataProvider {
public byte[] sequence;
public int KMER_LENGTH;
@ -98,7 +103,7 @@ public class SeqGraphUnitTest extends BaseTest {
return MergeNodesWithNoVariationTestProvider.getTests(MergeNodesWithNoVariationTestProvider.class);
}
@Test(dataProvider = "MergeNodesWithNoVariationTestProvider", enabled = true)
@Test(dataProvider = "MergeNodesWithNoVariationTestProvider", enabled = !DEBUG)
public void testMergeNodesWithNoVariation(MergeNodesWithNoVariationTestProvider cfg) {
logger.warn(String.format("Test: %s", cfg.toString()));
@ -178,7 +183,7 @@ public class SeqGraphUnitTest extends BaseTest {
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "IsDiamondData", enabled = true)
@Test(dataProvider = "IsDiamondData", enabled = !DEBUG)
public void testIsDiamond(final SeqGraph graph, final SeqVertex v, final boolean isRootOfDiamond) {
final SeqGraph.MergeDiamonds merger = graph.new MergeDiamonds();
merger.setDontModifyGraphEvenIfPossible();
@ -191,8 +196,8 @@ public class SeqGraphUnitTest extends BaseTest {
final SeqGraph graph = new SeqGraph();
SeqVertex pre1 = new SeqVertex("ACT");
SeqVertex pre2 = new SeqVertex("AGT");
SeqVertex pre1 = new SeqVertex(Utils.dupString("A", SeqGraph.MIN_COMMON_SEQUENCE_TO_MERGE_SOURCE_SINK_VERTICES) + "CT");
SeqVertex pre2 = new SeqVertex(Utils.dupString("A", SeqGraph.MIN_COMMON_SEQUENCE_TO_MERGE_SOURCE_SINK_VERTICES) + "GT");
SeqVertex top = new SeqVertex("A");
SeqVertex middle1 = new SeqVertex("GC");
SeqVertex middle2 = new SeqVertex("TC");
@ -278,7 +283,7 @@ public class SeqGraphUnitTest extends BaseTest {
final SeqVertex newMiddle1 = new SeqVertex("G");
final SeqVertex newMiddle2 = new SeqVertex("T");
final SeqVertex newBottom = new SeqVertex("C" + bottom.getSequenceString());
final SeqVertex newTop = new SeqVertex("A");
final SeqVertex newTop = new SeqVertex(Utils.dupString("A", SeqGraph.MIN_COMMON_SEQUENCE_TO_MERGE_SOURCE_SINK_VERTICES));
final SeqVertex newTopDown1 = new SeqVertex("G");
final SeqVertex newTopDown2 = new SeqVertex("C");
final SeqVertex newTopBottomMerged = new SeqVertex("TA");
@ -311,7 +316,7 @@ public class SeqGraphUnitTest extends BaseTest {
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "MergingData", enabled = true)
@Test(dataProvider = "MergingData", enabled = !DEBUG)
public void testMerging(final SeqGraph graph, final SeqGraph expected) {
final SeqGraph merged = (SeqGraph)graph.clone();
merged.simplifyGraph(1);
@ -333,7 +338,7 @@ public class SeqGraphUnitTest extends BaseTest {
//
// Should become A -> ACT -> C [ref and non-ref edges]
//
@Test
@Test(enabled = !DEBUG)
public void testBubbleSameBasesWithRef() {
final SeqGraph graph = new SeqGraph();
final SeqVertex top = new SeqVertex("A");
@ -351,4 +356,169 @@ public class SeqGraphUnitTest extends BaseTest {
actual.simplifyGraph();
Assert.assertTrue(BaseGraph.graphEquals(actual, expected), "Wrong merging result after complete merging");
}
@DataProvider(name = "LinearZipData")
public Object[][] makeLinearZipData() throws Exception {
List<Object[]> tests = new ArrayList<Object[]>();
SeqGraph graph = new SeqGraph();
SeqGraph expected = new SeqGraph();
// empty graph => empty graph
tests.add(new Object[]{graph.clone(), expected.clone()});
SeqVertex a1 = new SeqVertex("A");
SeqVertex c1 = new SeqVertex("C");
SeqVertex ac1 = new SeqVertex("AC");
// just a single vertex
graph.addVertices(a1, c1);
expected.addVertices(a1, c1);
tests.add(new Object[]{graph.clone(), expected.clone()});
graph.addEdges(a1, c1);
expected = new SeqGraph();
expected.addVertices(ac1);
tests.add(new Object[]{graph.clone(), expected.clone()});
// three long chain merged corrected
SeqVertex g1 = new SeqVertex("G");
graph.addVertices(g1);
graph.addEdges(c1, g1);
expected = new SeqGraph();
expected.addVertex(new SeqVertex("ACG"));
tests.add(new Object[]{graph.clone(), expected.clone()});
// adding something that isn't connected isn't a problem
SeqVertex t1 = new SeqVertex("T");
graph.addVertices(t1);
expected = new SeqGraph();
expected.addVertices(new SeqVertex("ACG"), new SeqVertex("T"));
tests.add(new Object[]{graph.clone(), expected.clone()});
// splitting chain with branch produces the correct zipped subgraphs
final SeqVertex a2 = new SeqVertex("A");
final SeqVertex c2 = new SeqVertex("C");
graph = new SeqGraph();
graph.addVertices(a1, c1, g1, t1, a2, c2);
graph.addEdges(a1, c1, g1, t1, a2);
graph.addEdges(g1, c2);
expected = new SeqGraph();
SeqVertex acg = new SeqVertex("ACG");
SeqVertex ta = new SeqVertex("TA");
expected.addVertices(acg, ta, c2);
expected.addEdges(acg, ta);
expected.addEdges(acg, c2);
tests.add(new Object[]{graph.clone(), expected.clone()});
// Can merge chains with loops in them
{
graph = new SeqGraph();
graph.addVertices(a1, c1, g1);
graph.addEdges(a1, c1, g1);
graph.addEdges(a1, a1);
expected = new SeqGraph();
SeqVertex ac = new SeqVertex("AC");
SeqVertex cg = new SeqVertex("CG");
expected.addVertices(a1, cg);
expected.addEdges(a1, cg);
expected.addEdges(a1, a1);
tests.add(new Object[]{graph.clone(), expected.clone()});
graph.removeEdge(a1, a1);
graph.addEdges(c1, c1);
tests.add(new Object[]{graph.clone(), graph.clone()});
graph.removeEdge(c1, c1);
graph.addEdges(g1, g1);
expected = new SeqGraph();
expected.addVertices(ac, g1);
expected.addEdges(ac, g1, g1);
tests.add(new Object[]{graph.clone(), expected.clone()});
}
// check building n element long chains
{
final List<String> bases = Arrays.asList("A", "C", "G", "T", "TT", "GG", "CC", "AA");
for ( final int len : Arrays.asList(1, 2, 10, 100, 1000)) {
graph = new SeqGraph();
expected = new SeqGraph();
SeqVertex last = null;
String expectedBases = "";
for ( int i = 0; i < len; i++ ) {
final String seq = bases.get(i % bases.size());
expectedBases += seq;
SeqVertex a = new SeqVertex(seq);
graph.addVertex(a);
if ( last != null ) graph.addEdge(last, a);
last = a;
}
expected.addVertex(new SeqVertex(expectedBases));
tests.add(new Object[]{graph.clone(), expected.clone()});
}
}
// check that edge connections are properly maintained
{
int edgeWeight = 1;
for ( final int nIncoming : Arrays.asList(0, 2, 5, 10) ) {
for ( final int nOutgoing : Arrays.asList(0, 2, 5, 10) ) {
graph = new SeqGraph();
expected = new SeqGraph();
graph.addVertices(a1, c1, g1);
graph.addEdges(a1, c1, g1);
expected.addVertex(acg);
for ( final SeqVertex v : makeVertices(nIncoming) ) {
final BaseEdge e = new BaseEdge(false, edgeWeight++);
graph.addVertices(v);
graph.addEdge(v, a1, e);
expected.addVertex(v);
expected.addEdge(v, acg, e);
}
for ( final SeqVertex v : makeVertices(nOutgoing) ) {
final BaseEdge e = new BaseEdge(false, edgeWeight++);
graph.addVertices(v);
graph.addEdge(g1, v, e);
expected.addVertex(v);
expected.addEdge(acg, v, e);
}
tests.add(new Object[]{graph, expected});
}
}
}
return tests.toArray(new Object[][]{});
}
private List<SeqVertex> makeVertices(final int n) {
final List<SeqVertex> vs = new LinkedList<SeqVertex>();
final List<String> bases = Arrays.asList("A", "C", "G", "T", "TT", "GG", "CC", "AA");
for ( int i = 0; i < n; i++ )
vs.add(new SeqVertex(bases.get(i % bases.size())));
return vs;
}
@Test(dataProvider = "LinearZipData", enabled = true)
public void testLinearZip(final SeqGraph graph, final SeqGraph expected) {
final SeqGraph merged = (SeqGraph)graph.clone();
merged.zipLinearChains();
try {
Assert.assertTrue(SeqGraph.graphEquals(merged, expected));
} catch (AssertionError e) {
if ( ! SeqGraph.graphEquals(merged, expected) ) {
graph.printGraph(new File("graph.dot"), 0);
merged.printGraph(new File("merged.dot"), 0);
expected.printGraph(new File("expected.dot"), 0);
}
throw e;
}
}
}

View File

@ -98,10 +98,10 @@ public class SharedVertexSequenceSplitterUnitTest extends BaseTest {
min = Math.min(min, s.length());
}
final int actualPrefixLen = org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.Utils.compPrefixLen(bytes, min);
final int actualPrefixLen = GraphUtils.compPrefixLen(bytes, min);
Assert.assertEquals(actualPrefixLen, expectedPrefixLen, "Failed prefix test");
final int actualSuffixLen = org.broadinstitute.sting.gatk.walkers.haplotypecaller.graphs.Utils.compSuffixLen(bytes, min - actualPrefixLen);
final int actualSuffixLen = GraphUtils.compSuffixLen(bytes, min - actualPrefixLen);
Assert.assertEquals(actualSuffixLen, expectedSuffixLen, "Failed suffix test");
}
@ -250,4 +250,45 @@ public class SharedVertexSequenceSplitterUnitTest extends BaseTest {
}
}
}
@DataProvider(name = "MeetsMinSequenceData")
public Object[][] makeMeetsMinSequenceData() {
List<Object[]> tests = new ArrayList<Object[]>();
final boolean prefixBiased = SharedVertexSequenceSplitter.prefersPrefixMerging();
tests.add(new Object[]{Arrays.asList("AC", "AC"), 0, true, true});
tests.add(new Object[]{Arrays.asList("AC", "AC"), 1, prefixBiased, ! prefixBiased});
tests.add(new Object[]{Arrays.asList("AC", "AC"), 2, prefixBiased, ! prefixBiased});
tests.add(new Object[]{Arrays.asList("AC", "AC"), 3, false, false});
tests.add(new Object[]{Arrays.asList("A", "AC"), 1, true, false});
tests.add(new Object[]{Arrays.asList("A", "AC"), 2, false, false});
tests.add(new Object[]{Arrays.asList("AT", "AC"), 1, true, false});
tests.add(new Object[]{Arrays.asList("AAT", "AAC"), 1, true, false});
tests.add(new Object[]{Arrays.asList("AAT", "AAC"), 2, true, false});
tests.add(new Object[]{Arrays.asList("AAT", "AAC"), 3, false, false});
tests.add(new Object[]{Arrays.asList("AATCCC", "AACCCC"), 1, true, true});
tests.add(new Object[]{Arrays.asList("AATCCC", "AACCCC"), 2, true, true});
tests.add(new Object[]{Arrays.asList("AATCCC", "AACCCC"), 3, false, true});
tests.add(new Object[]{Arrays.asList("AATCCC", "AACCCC"), 4, false, false});
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "MeetsMinSequenceData")
public void testSplitterCompleteCycle(final List<String> mids, final int minSeqLength, final boolean prefixMeets, final boolean suffixMeets) {
final SeqGraph graph = new SeqGraph();
final SeqVertex top = new SeqVertex("AAAAAAAA");
final SeqVertex bot = new SeqVertex("GGGGGGGG");
final List<SeqVertex> v = new ArrayList<SeqVertex>();
for ( final String s : mids ) { v.add(new SeqVertex(s)); }
graph.addVertices(v.toArray(new SeqVertex[]{}));
graph.addVertices(top, bot);
for ( final SeqVertex vi : v ) { graph.addEdge(top, vi); graph.addEdge(vi, bot); }
final SharedVertexSequenceSplitter splitter = new SharedVertexSequenceSplitter(graph, v);
Assert.assertEquals(splitter.meetsMinMergableSequenceForPrefix(minSeqLength), prefixMeets, "Prefix failed");
Assert.assertEquals(splitter.meetsMinMergableSequenceForSuffix(minSeqLength), suffixMeets, "Suffix failed");
Assert.assertEquals(splitter.meetsMinMergableSequenceForEitherPrefixOrSuffix(minSeqLength), suffixMeets || prefixMeets, "Either prefix or suffix failed");
}
}

View File

@ -46,6 +46,8 @@
package org.broadinstitute.sting.utils.genotyper;
import net.sf.samtools.*;
import org.apache.commons.lang.ArrayUtils;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.sting.utils.BaseUtils;
@ -54,33 +56,16 @@ import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileupImpl;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.sting.utils.Utils;
import java.util.Map;
import java.util.List;
import org.testng.Assert;
import org.testng.annotations.Test;
import net.sf.picard.reference.IndexedFastaSequenceFile;
import net.sf.samtools.SAMFileHeader;
import net.sf.samtools.SAMFileReader;
import net.sf.samtools.SAMRecord;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.GenomeLocParser;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.activeregion.ActiveRegion;
import org.broadinstitute.sting.utils.activeregion.ActivityProfileState;
import org.broadinstitute.sting.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.sting.utils.pileup.PileupElement;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileup;
import org.broadinstitute.sting.utils.pileup.ReadBackedPileupImpl;
import org.broadinstitute.sting.utils.sam.ArtificialBAMBuilder;
import org.broadinstitute.sting.utils.sam.ArtificialSAMUtils;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;
import org.broadinstitute.sting.utils.sam.GATKSamRecordFactory;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
import org.broadinstitute.variant.vcf.VCFCodec;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.*;
@ -235,7 +220,82 @@ public class PerReadAlleleLikelihoodMapUnitTest extends BaseTest {
Assert.assertEquals(downsampledStrat.get(base_A).size(),(int) (pileup.depthOfCoverage()/2) - 1);
Assert.assertEquals(downsampledStrat.get(base_C).size(),(int) (pileup.depthOfCoverage()/2));
Assert.assertEquals(downsampledStrat.get(base_T).size(),0);
}
@DataProvider(name = "PoorlyModelledReadData")
public Object[][] makePoorlyModelledReadData() {
List<Object[]> tests = new ArrayList<Object[]>();
// this functionality can be adapted to provide input data for whatever you might want in your data
tests.add(new Object[]{10, 0.1, false, Arrays.asList(0.0)});
tests.add(new Object[]{10, 0.1, true, Arrays.asList(-10.0)});
tests.add(new Object[]{10, 0.1, false, Arrays.asList(0.0, -10.0)});
tests.add(new Object[]{10, 0.1, true, Arrays.asList(-5.0, -10.0)});
tests.add(new Object[]{100, 0.1, false, Arrays.asList(-5.0, -10.0)});
tests.add(new Object[]{100, 0.01, true, Arrays.asList(-5.0, -10.0)});
tests.add(new Object[]{100, 0.01, false, Arrays.asList(-5.0, -10.0, -3.0)});
tests.add(new Object[]{100, 0.01, false, Arrays.asList(-5.0, -10.0, -2.0)});
tests.add(new Object[]{100, 0.01, true, Arrays.asList(-5.0, -10.0, -4.0)});
tests.add(new Object[]{100, 0.001, true, Arrays.asList(-5.0, -10.0)});
tests.add(new Object[]{100, 0.001, false, Arrays.asList(-5.0, -10.0, 0.0)});
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "PoorlyModelledReadData")
public void testPoorlyModelledRead(final int readLen, final double maxErrorRatePerBase, final boolean expected, final List<Double> log10likelihoods) {
final byte[] bases = Utils.dupBytes((byte)'A', readLen);
final byte[] quals = Utils.dupBytes((byte) 30, readLen);
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(bases, quals, readLen + "M");
final PerReadAlleleLikelihoodMap map = new PerReadAlleleLikelihoodMap();
final boolean actual = map.readIsPoorlyModelled(read, log10likelihoods, maxErrorRatePerBase);
Assert.assertEquals(actual, expected);
}
@DataProvider(name = "RemovingPoorlyModelledReadData")
public Object[][] makeRemovingPoorlyModelledReadData() {
List<Object[]> tests = new ArrayList<Object[]>();
// this functionality can be adapted to provide input data for whatever you might want in your data
final int readLen = 10;
for ( int nReads = 0; nReads < 4; nReads++ ) {
for ( int nBad = 0; nBad <= nReads; nBad++ ) {
final int nGood = nReads - nBad;
tests.add(new Object[]{readLen, nReads, nBad, nGood});
}
}
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "RemovingPoorlyModelledReadData")
public void testRemovingPoorlyModelledReads(final int readLen, final int nReads, final int nBad, final int nGood) {
final PerReadAlleleLikelihoodMap map = new PerReadAlleleLikelihoodMap();
final Set<GATKSAMRecord> goodReads = new HashSet<GATKSAMRecord>();
final Set<GATKSAMRecord> badReads = new HashSet<GATKSAMRecord>();
for ( int readI = 0; readI < nReads; readI++ ) {
final boolean bad = readI < nBad;
final double likelihood = bad ? -100.0 : 0.0;
final byte[] bases = Utils.dupBytes((byte)'A', readLen);
final byte[] quals = Utils.dupBytes((byte) 30, readLen);
final Allele allele = Allele.create(Utils.dupString("A", readI+1));
final GATKSAMRecord read = ArtificialSAMUtils.createArtificialRead(bases, quals, readLen + "M");
read.setReadName("readName" + readI);
map.add(read, allele, likelihood);
(bad ? badReads : goodReads).add(read);
}
final List<GATKSAMRecord> removedReads = map.filterPoorlyModelledReads(0.01);
Assert.assertEquals(removedReads.size(), nBad, "nBad " + nBad + " nGood " + nGood);
Assert.assertEquals(new HashSet<GATKSAMRecord>(removedReads), badReads, "nBad " + nBad + " nGood " + nGood);
Assert.assertEquals(map.size(), nGood, "nBad " + nBad + " nGood " + nGood);
Assert.assertTrue(map.getStoredElements().containsAll(goodReads), "nBad " + nBad + " nGood " + nGood);
Assert.assertEquals(map.getStoredElements().size(), nGood, "nBad " + nBad + " nGood " + nGood);
}
}

View File

@ -0,0 +1,77 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.haplotype;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.Utils;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class HaplotypeBaseComparatorUnitTest extends BaseTest {
@Test
public void testComparison() {
final List<String> rawStrings = Arrays.asList("A", "C", "AC", "CT", "GTC", "ACGT");
final List<String> lexStrings = new ArrayList<String>(rawStrings);
Collections.sort(lexStrings);
for ( final List<String> seqs : Utils.makePermutations(lexStrings, lexStrings.size(), false) ) {
final List<Haplotype> haps = new ArrayList<Haplotype>(seqs.size());
for ( final String seq : seqs ) {
haps.add(new Haplotype(seq.getBytes(), false));
}
Collections.sort(haps, new HaplotypeBaseComparator());
for ( int i = 0; i < lexStrings.size(); i++ )
Assert.assertEquals(haps.get(i).getBaseString(), lexStrings.get(i), "Failed sort " + haps + " expected " + lexStrings);
}
}
}

View File

@ -0,0 +1,118 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.haplotype;
import org.broadinstitute.sting.BaseTest;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
public class HaplotypeLDCalculatorUnitTest extends BaseTest {
HaplotypeLDCalculator calculator;
@BeforeMethod
public void setUp() throws Exception {
calculator = new HaplotypeLDCalculator();
}
/**
* Tests that we get the right values from the R^2 calculation
*/
@Test
public void computeProbOfBeingPhased() {
logger.warn("Executing testCalculateR2LD");
// See AA, AB, and BA in population
Assert.assertEquals(calculator.pPhasedTest(0, 0, 0, -100), 0, 0.00001);
// See AA, AB, BB in population
Assert.assertTrue(calculator.pPhasedTest(0, 0, -100, 0) < 0.5);
// See AA and BB in population
Assert.assertEquals(calculator.pPhasedTest(0, -100, -100, 0), 1, 0.00001);
// See AA, AB, and BA but no BBs in population
Assert.assertEquals(calculator.pPhasedTest(0, -20, -40, Double.NEGATIVE_INFINITY), 0, 0.00001);
// See BB, AB, and BA but no AAs in population, so BB is the best explanation
Assert.assertEquals(calculator.pPhasedTest(Double.NEGATIVE_INFINITY, -20, -40, 0), 1, 0.00001);
// See only AB and BA but no AAs nor BBs in population
Assert.assertEquals(calculator.pPhasedTest(Double.NEGATIVE_INFINITY, -20, -40, Double.NEGATIVE_INFINITY), 0, 0.00001);
// Previously bad input
Assert.assertEquals(calculator.pPhasedTest(-400, -600, -1200, Double.NEGATIVE_INFINITY), 0, 0.00001);
// first variant is just bad, so BA and BB are both very bad, shouldn't be phased
Assert.assertEquals(calculator.pPhasedTest(0, -1000, -100, -10000), 0, 0.00001);
// second variant is just bad, so AB and BB are both very bad, shouldn't be phased
Assert.assertEquals(calculator.pPhasedTest(0, -100, -1000, -10000), 0, 0.00001);
// AA is very good, all all others are quite poor. Shouldn't be phased
Assert.assertEquals(calculator.pPhasedTest(0, -1000, -1000, -10000), 0, 0.00001);
for ( int i = -10; i > -10000; i -= 10 ) {
// only bad het states
Assert.assertTrue(calculator.pPhasedTest(0, i, i, 0) > 0.99, "Failed for " + i);
// BB state is terrible
Assert.assertTrue(calculator.pPhasedTest(0, 0, 0, i) < 0.5, "Failed for " + i);
// truth is AB, BA, and BB
Assert.assertTrue(calculator.pPhasedTest(i, 0, 0, 0) < 0.5, "Failed for " + i);
// truth is AB, BA
Assert.assertTrue(calculator.pPhasedTest(i, 0, 0, i) < 0.5, "Failed for " + i);
// Only good signal is AB, so we shouldn't be phased
Assert.assertTrue(calculator.pPhasedTest(i, i, 0, i) < 0.5, "Failed for " + i);
Assert.assertTrue(calculator.pPhasedTest(i, 0, i, i) < 0.5, "Failed for " + i);
}
}
}

View File

@ -0,0 +1,76 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.haplotype;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.Utils;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class HaplotypeScoreComparatorUnitTest extends BaseTest {
@Test
public void testComparison() {
final List<Double> scores = Arrays.asList(3.0, 2.0, 1.0);
for ( final List<Double> myScores : Utils.makePermutations(scores, scores.size(), false) ) {
final List<Haplotype> haps = new ArrayList<Haplotype>(myScores.size());
for ( final double score : myScores ) {
final Haplotype h = new Haplotype("ACT".getBytes(), false);
h.setScore(score);
haps.add(h);
}
Collections.sort(haps, new HaplotypeScoreComparator());
for ( int i = 0; i < myScores.size(); i++ )
Assert.assertEquals(haps.get(i).getScore(), scores.get(i));
}
}
}

View File

@ -0,0 +1,334 @@
/*
* By downloading the PROGRAM you agree to the following terms of use:
*
* BROAD INSTITUTE - SOFTWARE LICENSE AGREEMENT - FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLY
*
* This Agreement is made between the Broad Institute, Inc. with a principal address at 7 Cambridge Center, Cambridge, MA 02142 (BROAD) and the LICENSEE and is effective at the date the downloading is completed (EFFECTIVE DATE).
*
* WHEREAS, LICENSEE desires to license the PROGRAM, as defined hereinafter, and BROAD wishes to have this PROGRAM utilized in the public interest, subject only to the royalty-free, nonexclusive, nontransferable license rights of the United States Government pursuant to 48 CFR 52.227-14; and
* WHEREAS, LICENSEE desires to license the PROGRAM and BROAD desires to grant a license on the following terms and conditions.
* NOW, THEREFORE, in consideration of the promises and covenants made herein, the parties hereto agree as follows:
*
* 1. DEFINITIONS
* 1.1 PROGRAM shall mean copyright in the object code and source code known as GATK2 and related documentation, if any, as they exist on the EFFECTIVE DATE and can be downloaded from http://www.broadinstitute/GATK on the EFFECTIVE DATE.
*
* 2. LICENSE
* 2.1 Grant. Subject to the terms of this Agreement, BROAD hereby grants to LICENSEE, solely for academic non-commercial research purposes, a non-exclusive, non-transferable license to: (a) download, execute and display the PROGRAM and (b) create bug fixes and modify the PROGRAM.
* The LICENSEE may apply the PROGRAM in a pipeline to data owned by users other than the LICENSEE and provide these users the results of the PROGRAM provided LICENSEE does so for academic non-commercial purposes only. For clarification purposes, academic sponsored research is not a commercial use under the terms of this Agreement.
* 2.2 No Sublicensing or Additional Rights. LICENSEE shall not sublicense or distribute the PROGRAM, in whole or in part, without prior written permission from BROAD. LICENSEE shall ensure that all of its users agree to the terms of this Agreement. LICENSEE further agrees that it shall not put the PROGRAM on a network, server, or other similar technology that may be accessed by anyone other than the LICENSEE and its employees and users who have agreed to the terms of this agreement.
* 2.3 License Limitations. Nothing in this Agreement shall be construed to confer any rights upon LICENSEE by implication, estoppel, or otherwise to any computer software, trademark, intellectual property, or patent rights of BROAD, or of any other entity, except as expressly granted herein. LICENSEE agrees that the PROGRAM, in whole or part, shall not be used for any commercial purpose, including without limitation, as the basis of a commercial software or hardware product or to provide services. LICENSEE further agrees that the PROGRAM shall not be copied or otherwise adapted in order to circumvent the need for obtaining a license for use of the PROGRAM.
*
* 3. OWNERSHIP OF INTELLECTUAL PROPERTY
* LICENSEE acknowledges that title to the PROGRAM shall remain with BROAD. The PROGRAM is marked with the following BROAD copyright notice and notice of attribution to contributors. LICENSEE shall retain such notice on all copies. LICENSEE agrees to include appropriate attribution if any results obtained from use of the PROGRAM are included in any publication.
* Copyright 2012 Broad Institute, Inc.
* Notice of attribution: The GATK2 program was made available through the generosity of Medical and Population Genetics program at the Broad Institute, Inc.
* LICENSEE shall not use any trademark or trade name of BROAD, or any variation, adaptation, or abbreviation, of such marks or trade names, or any names of officers, faculty, students, employees, or agents of BROAD except as states above for attribution purposes.
*
* 4. INDEMNIFICATION
* LICENSEE shall indemnify, defend, and hold harmless BROAD, and their respective officers, faculty, students, employees, associated investigators and agents, and their respective successors, heirs and assigns, (Indemnitees), against any liability, damage, loss, or expense (including reasonable attorneys fees and expenses) incurred by or imposed upon any of the Indemnitees in connection with any claims, suits, actions, demands or judgments arising out of any theory of liability (including, without limitation, actions in the form of tort, warranty, or strict liability and regardless of whether such action has any factual basis) pursuant to any right or license granted under this Agreement.
*
* 5. NO REPRESENTATIONS OR WARRANTIES
* THE PROGRAM IS DELIVERED AS IS. BROAD MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE PROGRAM OR THE COPYRIGHT, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, WHETHER OR NOT DISCOVERABLE. BROAD EXTENDS NO WARRANTIES OF ANY KIND AS TO PROGRAM CONFORMITY WITH WHATEVER USER MANUALS OR OTHER LITERATURE MAY BE ISSUED FROM TIME TO TIME.
* IN NO EVENT SHALL BROAD OR ITS RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AFFILIATED INVESTIGATORS AND AFFILIATES BE LIABLE FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, INCLUDING, WITHOUT LIMITATION, ECONOMIC DAMAGES OR INJURY TO PROPERTY AND LOST PROFITS, REGARDLESS OF WHETHER BROAD SHALL BE ADVISED, SHALL HAVE OTHER REASON TO KNOW, OR IN FACT SHALL KNOW OF THE POSSIBILITY OF THE FOREGOING.
*
* 6. ASSIGNMENT
* This Agreement is personal to LICENSEE and any rights or obligations assigned by LICENSEE without the prior written consent of BROAD shall be null and void.
*
* 7. MISCELLANEOUS
* 7.1 Export Control. LICENSEE gives assurance that it will comply with all United States export control laws and regulations controlling the export of the PROGRAM, including, without limitation, all Export Administration Regulations of the United States Department of Commerce. Among other things, these laws and regulations prohibit, or require a license for, the export of certain types of software to specified countries.
* 7.2 Termination. LICENSEE shall have the right to terminate this Agreement for any reason upon prior written notice to BROAD. If LICENSEE breaches any provision hereunder, and fails to cure such breach within thirty (30) days, BROAD may terminate this Agreement immediately. Upon termination, LICENSEE shall provide BROAD with written assurance that the original and all copies of the PROGRAM have been destroyed, except that, upon prior written authorization from BROAD, LICENSEE may retain a copy for archive purposes.
* 7.3 Survival. The following provisions shall survive the expiration or termination of this Agreement: Articles 1, 3, 4, 5 and Sections 2.2, 2.3, 7.3, and 7.4.
* 7.4 Notice. Any notices under this Agreement shall be in writing, shall specifically refer to this Agreement, and shall be sent by hand, recognized national overnight courier, confirmed facsimile transmission, confirmed electronic mail, or registered or certified mail, postage prepaid, return receipt requested. All notices under this Agreement shall be deemed effective upon receipt.
* 7.5 Amendment and Waiver; Entire Agreement. This Agreement may be amended, supplemented, or otherwise modified only by means of a written instrument signed by all parties. Any waiver of any rights or failure to act in a specific instance shall relate only to such instance and shall not be construed as an agreement to waive any rights or fail to act in any other instance, whether or not similar. This Agreement constitutes the entire agreement among the parties with respect to its subject matter and supersedes prior agreements or understandings between the parties relating to its subject matter.
* 7.6 Binding Effect; Headings. This Agreement shall be binding upon and inure to the benefit of the parties and their respective permitted successors and assigns. All headings are for convenience only and shall not affect the meaning of any provision of this Agreement.
* 7.7 Governing Law. This Agreement shall be construed, governed, interpreted and applied in accordance with the internal laws of the Commonwealth of Massachusetts, U.S.A., without regard to conflict of laws principles.
*/
package org.broadinstitute.sting.utils.haplotype;
import net.sf.samtools.TextCigarCodec;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.*;
import org.broadinstitute.sting.utils.fasta.CachingIndexedFastaSequenceFile;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.io.File;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.TreeSet;
public class LDMergerUnitTest extends BaseTest {
LDMerger merger;
GenomeLocParser genomeLocParser;
@BeforeClass
public void init() throws FileNotFoundException {
genomeLocParser = new GenomeLocParser(new CachingIndexedFastaSequenceFile(new File(b37KGReference)));
}
@BeforeMethod
public void setUp() throws Exception {
merger = new LDMerger();
}
@Test
public void testCreateMergedVariantContext() {
logger.warn("Executing testCreateMergedVariantContext");
final byte[] ref = "AATTCCGGAATTCCGGAATT".getBytes();
final GenomeLoc refLoc = genomeLocParser.createGenomeLoc("2", 1700, 1700 + ref.length);
// SNP + SNP = simple MNP
VariantContext thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
VariantContext nextVC = new VariantContextBuilder().loc("2", 1704, 1704).alleles("C","G").make();
VariantContext truthVC = new VariantContextBuilder().loc("2", 1703, 1704).alleles("TC","GG").source("merged").make();
VariantContext mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// SNP + ref + SNP = MNP with ref base gap
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","G").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","GCG").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + SNP
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TAAAAA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","G").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","TAAAAACG").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// SNP + insertion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","CAAAAA").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","GCCAAAAA").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// deletion + SNP
thisVC = new VariantContextBuilder().loc("2", 1703, 1704).alleles("TC","T").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","G").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","TG").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// SNP + deletion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","G").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1706).alleles("TCCG","GCC").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + deletion = MNP
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1704, 1706).alleles("CCG","ACC").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + deletion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TAAAAA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1706).alleles("TCCG","TAAAAACC").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// insertion + insertion
thisVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("T","TA").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1705).alleles("C","CA").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1705).alleles("TCC","TACCA").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// deletion + deletion
thisVC = new VariantContextBuilder().loc("2", 1701, 1702).alleles("AT","A").make();
nextVC = new VariantContextBuilder().loc("2", 1705, 1706).alleles("CG","C").make();
truthVC = new VariantContextBuilder().loc("2", 1701, 1706).alleles("ATTCCG","ATCC").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// deletion + insertion (abutting)
thisVC = new VariantContextBuilder().loc("2", 1701, 1702).alleles("AT","A").make();
nextVC = new VariantContextBuilder().loc("2", 1702, 1702).alleles("T","GCGCGC").make();
truthVC = new VariantContextBuilder().loc("2", 1701, 1702).alleles("AT","AGCGCGC").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
// complex + complex
thisVC = new VariantContextBuilder().loc("2", 1703, 1704).alleles("TC","AAA").make();
nextVC = new VariantContextBuilder().loc("2", 1706, 1707).alleles("GG","AC").make();
truthVC = new VariantContextBuilder().loc("2", 1703, 1707).alleles("TCCGG","AAACAC").source("merged").make();
mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
logger.warn(truthVC + " == " + mergedVC);
Assert.assertTrue(truthVC.hasSameAllelesAs(mergedVC));
Assert.assertEquals(truthVC.getStart(), mergedVC.getStart());
Assert.assertEquals(truthVC.getEnd(), mergedVC.getEnd());
}
@Test
public void testInsertionDeletionBecomingNullAllele() {
final byte[] ref = "CAAA".getBytes();
final GenomeLoc refLoc = genomeLocParser.createGenomeLoc("2", 1700, 1700 + ref.length);
// insertion + deletion results in a null allele, should return false
final VariantContext thisVC = new VariantContextBuilder().loc("2", 1700, 1701).alleles("CA","C").make();
final VariantContext nextVC = new VariantContextBuilder().loc("2", 1703, 1703).alleles("A","AA").make();
final VariantContext mergedVC = merger.createMergedVariantContext(thisVC, nextVC, ref, refLoc);
Assert.assertNull(mergedVC, "Insertion deletion becoming a null allele should return a null variant context");
}
/**
* Just returns a given R2 value for testing
*/
private static class MockLDCalculator extends HaplotypeLDCalculator {
private final double R2;
private MockLDCalculator(double r2) {
R2 = r2;
}
@Override
protected double computeProbOfBeingPhased(VariantContext first, VariantContext second) {
return R2;
}
}
@DataProvider(name = "R2MergerData")
public Object[][] makeR2MergerData() {
List<Object[]> tests = new ArrayList<Object[]>();
// this functionality can be adapted to provide input data for whatever you might want in your data
final double thres = LDMerger.MERGE_EVENTS_PROB_PHASED_THRESHOLD;
for ( final double r2 : Arrays.asList(0.0, thres - 0.01, thres + 0.01, 1.0) ) {
tests.add(new Object[]{"ACGT", "CCGC", 2, "4M", "ACGT", "CCGC", r2, r2 >= thres});
tests.add(new Object[]{"ACGT", "AGGC", 2, "4M", "CGT", "GGC", r2, r2 >= thres});
tests.add(new Object[]{"ACGT", "ACCC", 2, "4M", "GT", "CC", r2, r2 >= thres});
tests.add(new Object[]{"ACGT", "ACCGTT", 2, "2M1I1M1I1M", "CG", "CCGT", r2, r2 >= thres});
tests.add(new Object[]{"ACGT", "AGCT", 2, "4M", "CG", "GC", r2, r2 >= thres});
tests.add(new Object[]{"ACAGT", "AAGC", 2, "1M1D3M", "ACAGT", "AAGC", r2, r2 >= thres});
tests.add(new Object[]{"ACAGT", "AAT", 2, "1M1D1M1D1M", "ACAG", "AA", r2, r2 >= thres});
// cannot be merged -- only 1 event
tests.add(new Object[]{"AAA", "ACA", 1, "3M", null, null, r2, false});
final int dist = LDMerger.MAX_DISTANCE_BETWEEN_SNPS_TO_MERGE + 2;
tests.add(new Object[]{Utils.dupString("A", dist), "C" + Utils.dupString("A", dist - 2) + "C", 2, dist + "M", null, null, r2, false});
}
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "R2MergerData")
public void testR2Merger(final String refS, final String hapS, int nEvents, final String cigar, final String expectedMergedRef, final String expectedMergedAlt, final double r2, final boolean expectMerge) {
final Haplotype ref = new Haplotype(refS.getBytes(), true, 0, TextCigarCodec.getSingleton().decode(refS.length() + "M"));
final Haplotype hap = new Haplotype(hapS.getBytes(), false, 0, TextCigarCodec.getSingleton().decode(cigar));
final GenomeLoc loc = new UnvalidatingGenomeLoc("1", 0, 1, ref.length());
final List<Haplotype> haplotypes = Arrays.asList(ref, hap);
final TreeSet<Integer> vcStarts = EventMap.buildEventMapsForHaplotypes(haplotypes, ref.getBases(), loc, false);
final MockLDCalculator r2Calc = new MockLDCalculator(r2);
Assert.assertEquals(vcStarts.size(), nEvents);
final boolean merged = merger.mergeConsecutiveEventsBasedOnLDOnce(haplotypes, r2Calc, 1, vcStarts, ref.getBases(), loc);
Assert.assertEquals(merged, expectMerge);
Assert.assertEquals(vcStarts.size(), expectMerge ? 1 : nEvents);
if ( expectMerge ) {
final VariantContext vc = hap.getEventMap().getVariantContexts().iterator().next();
Assert.assertTrue(vc.isBiallelic());
Assert.assertEquals(vc.getReference().getDisplayString(), expectedMergedRef);
Assert.assertEquals(vc.getAlternateAllele(0).getDisplayString(), expectedMergedAlt);
}
}
@Test
public void testR2MergerWithThirdHapWithoutEvent() {
final String refS = "ACGT";
final String hapS = "CCGA";
final String cigar = "4M";
final Haplotype ref = new Haplotype(refS.getBytes(), true, 0, TextCigarCodec.getSingleton().decode(refS.length() + "M"));
final Haplotype hap1 = new Haplotype(hapS.getBytes(), false, 0, TextCigarCodec.getSingleton().decode(cigar));
final Haplotype hap2 = new Haplotype("ACGA".getBytes(), false, 0, TextCigarCodec.getSingleton().decode(cigar));
final GenomeLoc loc = new UnvalidatingGenomeLoc("1", 0, 1, ref.length());
final List<Haplotype> haplotypes = Arrays.asList(ref, hap1, hap2);
final TreeSet<Integer> vcStarts = EventMap.buildEventMapsForHaplotypes(haplotypes, ref.getBases(), loc, false);
final MockLDCalculator r2Calc = new MockLDCalculator(1.0);
Assert.assertEquals(vcStarts.size(), 2);
final boolean merged = merger.mergeConsecutiveEventsBasedOnLDOnce(haplotypes, r2Calc, 1, vcStarts, ref.getBases(), loc);
Assert.assertEquals(merged, true);
Assert.assertEquals(vcStarts.size(), 1);
final VariantContext vc = hap1.getEventMap().getVariantContexts().iterator().next();
Assert.assertTrue(vc.isBiallelic());
Assert.assertEquals(vc.getReference().getDisplayString(), "ACGT");
Assert.assertEquals(vc.getAlternateAllele(0).getDisplayString(), "CCGA");
Assert.assertEquals(hap2.getEventMap().size(), 0);
}
@Test
public void testR2MergerWithMultipleAllelesAtSites() {
final String refS = "ACGT";
final String hapS = "TCGA";
final String cigar = "4M";
final Haplotype ref = new Haplotype(refS.getBytes(), true, 0, TextCigarCodec.getSingleton().decode(refS.length() + "M"));
final Haplotype hap1 = new Haplotype(hapS.getBytes(), false, 0, TextCigarCodec.getSingleton().decode(cigar));
final GenomeLoc loc = new UnvalidatingGenomeLoc("1", 0, 1, ref.length());
for (final String hap2S : Arrays.asList("GCGA", "TCGG")) {
final Haplotype hap2 = new Haplotype(hap2S.getBytes(), false, 0, TextCigarCodec.getSingleton().decode(cigar));
final List<Haplotype> haplotypes = Arrays.asList(ref, hap1, hap2);
final TreeSet<Integer> vcStarts = EventMap.buildEventMapsForHaplotypes(haplotypes, ref.getBases(), loc, false);
final MockLDCalculator r2Calc = new MockLDCalculator(1.0);
Assert.assertEquals(vcStarts.size(), 2);
final boolean merged = merger.mergeConsecutiveEventsBasedOnLDOnce(haplotypes, r2Calc, 1, vcStarts, ref.getBases(), loc);
Assert.assertEquals(merged, false);
Assert.assertEquals(vcStarts.size(), 2);
}
}
}

View File

@ -487,6 +487,7 @@ public class TraverseActiveRegions<M, T> extends TraversalEngine<M,T,ActiveRegio
final AlignmentContext locus) {
// must be called, even if we won't use the result, to satisfy walker contract
final ActivityProfileState state = walker.isActive( tracker, refContext, locus );
if ( walker.forceActive) state.isActiveProb = 1.0;
if ( ! walkerHasPresetRegions ) {
activityProfile.add(state);
}

View File

@ -88,6 +88,14 @@ public abstract class ActiveRegionWalker<MapType, ReduceType> extends Walker<Map
@Argument(fullName="activeRegionExtension", shortName="activeRegionExtension", doc="The active region extension; if not provided defaults to Walker annotated default", required = false)
public Integer activeRegionExtension = null;
/**
* For the active region walker to treat all bases as active. Useful for debugging when you want to force something like
* the HaplotypeCaller to process a specific interval you provide the GATK
*/
@Advanced
@Argument(fullName="forceActive", shortName="forceActive", doc="If provided, all bases will be tagged as active", required = false)
public boolean forceActive = false;
@Advanced
@Argument(fullName="activeRegionMaxSize", shortName="activeRegionMaxSize", doc="The active region maximum size; if not provided defaults to Walker annotated default", required = false)
public Integer activeRegionMaxSize = null;

View File

@ -149,7 +149,7 @@ public class ActiveRegion implements HasGenomeLocation {
@Override
public String toString() {
return "ActiveRegion " + activeRegionLoc.toString() + " active?=" + isActive() + " nReads=" + reads.size() + " ";
return "ActiveRegion " + activeRegionLoc.toString() + " active?=" + isActive() + " nReads=" + reads.size();
}
/**
@ -374,6 +374,8 @@ public class ActiveRegion implements HasGenomeLocation {
*
* Note that the returned list may be empty, if this active region doesn't overlap the set at all
*
* Note that the resulting regions are all empty, regardless of whether the current active region has reads
*
* @param intervals a non-null set of intervals that are allowed
* @return an ordered list of active region where each interval is contained within intervals
*/
@ -383,14 +385,59 @@ public class ActiveRegion implements HasGenomeLocation {
final List<ActiveRegion> clippedRegions = new LinkedList<ActiveRegion>();
for ( final GenomeLoc overlapping : allOverlapping ) {
final GenomeLoc subLoc = getLocation().intersect(overlapping);
final int subStart = subLoc.getStart() - getLocation().getStart();
final int subEnd = subStart + subLoc.size();
final List<ActivityProfileState> subStates = supportingStates.isEmpty() ? supportingStates : supportingStates.subList(subStart, subEnd);
final ActiveRegion clipped = new ActiveRegion( subLoc, subStates, isActive, genomeLocParser, extension );
clippedRegions.add(clipped);
clippedRegions.add(trim(overlapping, extension));
}
return clippedRegions;
}
/**
* Trim this active to just the newExtent, producing a new active region without any reads that has only
* the extent of newExtend intersected with the current extent
* @param newExtent the new extend of the active region we want
* @param newExtension the extension size we want for the newly trimmed active region
* @return a non-null, empty active region
*/
public ActiveRegion trim(final GenomeLoc newExtent, final int newExtension) {
if ( newExtent == null ) throw new IllegalArgumentException("Active region extent cannot be null");
final GenomeLoc subLoc = getLocation().intersect(newExtent);
final int subStart = subLoc.getStart() - getLocation().getStart();
final int subEnd = subStart + subLoc.size();
final List<ActivityProfileState> subStates = supportingStates.isEmpty() ? supportingStates : supportingStates.subList(subStart, subEnd);
return new ActiveRegion( subLoc, subStates, isActive, genomeLocParser, newExtension );
}
/**
* Trim this active to no more than the newExtent, producing a new active region without any reads that
* attempts to provide the best possible representation of this active region covering the newExtent.
*
* The challenge here is that newExtent may (1) be larger than can be represented by this active region
* + its original extension and (2) the extension must be symmetric on both sides. This algorithm
* therefore determines how best to represent newExtent as a subset of the span of this
* region with a padding value that captures as much of the newExtent as possible.
*
* For example, suppose this active region is
*
* Active: 100-200 with extension of 50, so that the true span is 50-250
* NewExtent: 150-225 saying that we'd ideally like to just have bases 150-225
*
* Here we represent the active region as a active region from 150-200 with 25 bp of padding.
*
* The overall constraint is that the active region can never exceed the original active region, and
* the extension is chosen to maximize overlap with the desired region
*
* @param newExtent the new extend of the active region we want
* @return a non-null, empty active region
*/
public ActiveRegion trim(final GenomeLoc newExtent) {
if ( newExtent == null ) throw new IllegalArgumentException("Active region extent cannot be null");
final GenomeLoc subActive = getLocation().intersect(newExtent);
final int requiredOnRight = Math.max(newExtent.getStop() - subActive.getStop(), 0);
final int requiredOnLeft = Math.max(subActive.getStart() - newExtent.getStart(), 0);
final int requiredExtension = Math.min(Math.max(requiredOnLeft, requiredOnRight), getExtension());
return new ActiveRegion( subActive, Collections.<ActivityProfileState>emptyList(), isActive, genomeLocParser, requiredExtension );
}
}

View File

@ -251,4 +251,62 @@ public class PerReadAlleleLikelihoodMap {
}
return sb.toString();
}
/**
* Remove reads from this map that are poorly modelled w.r.t. their per allele likelihoods
*
* Goes through each read in this map, and if it is poorly modelled removes it from the map.
*
* @see #readIsPoorlyModelled(org.broadinstitute.sting.utils.sam.GATKSAMRecord, java.util.Collection, double)
* for more information about the poorly modelled test.
*
* @param maxErrorRatePerBase see equivalent parameter in #readIsPoorlyModelled
* @return the list of reads removed from this map because they are poorly modelled
*/
public List<GATKSAMRecord> filterPoorlyModelledReads(final double maxErrorRatePerBase) {
final List<GATKSAMRecord> removedReads = new LinkedList<GATKSAMRecord>();
final Iterator<Map.Entry<GATKSAMRecord, Map<Allele, Double>>> it = likelihoodReadMap.entrySet().iterator();
while ( it.hasNext() ) {
final Map.Entry<GATKSAMRecord, Map<Allele, Double>> record = it.next();
if ( readIsPoorlyModelled(record.getKey(), record.getValue().values(), maxErrorRatePerBase) ) {
it.remove();
removedReads.add(record.getKey());
}
}
return removedReads;
}
/**
* Is this read poorly modelled by all of the alleles in this map?
*
* A read is poorly modeled when it's likelihood is below what would be expected for a read
* originating from one of the alleles given the maxErrorRatePerBase of the reads in general.
*
* This function makes a number of key assumptions. First, that the likelihoods reflect the total likelihood
* of the read. In other words, that the read would be fully explained by one of the alleles. This means
* that the allele should be something like the full haplotype from which the read might originate.
*
* It further assumes that each error in the read occurs with likelihood of -3 (Q30 confidence per base). So
* a read with a 10% error rate with Q30 bases that's 100 bp long we'd expect to see 10 real Q30 errors
* even against the true haplotype. So for this read to be well modelled by at least one allele we'd expect
* a likelihood to be >= 10 * -3.
*
* @param read the read we want to evaluate
* @param log10Likelihoods a list of the log10 likelihoods of the read against a set of haplotypes.
* @param maxErrorRatePerBase the maximum error rate we'd expect for this read per base, in real space. So
* 0.01 means a 1% error rate
* @return true if none of the log10 likelihoods imply that the read truly originated from one of the haplotypes
*/
protected boolean readIsPoorlyModelled(final GATKSAMRecord read, final Collection<Double> log10Likelihoods, final double maxErrorRatePerBase) {
final double maxErrorsForRead = Math.ceil(read.getReadLength() * maxErrorRatePerBase);
final double log10QualPerBase = -3.0;
final double log10MaxLikelihoodForTrueAllele = maxErrorsForRead * log10QualPerBase;
for ( final double log10Likelihood : log10Likelihoods )
if ( log10Likelihood >= log10MaxLikelihoodForTrueAllele )
return false;
return true;
}
}

View File

@ -0,0 +1,416 @@
/*
* Copyright (c) 2012 The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.utils.haplotype;
import com.google.java.contract.Ensures;
import com.google.java.contract.Requires;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.broadinstitute.sting.utils.BaseUtils;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
import java.util.*;
/**
* Extract simple VariantContext events from a single haplotype
*
* User: depristo
* Date: 3/27/13
* Time: 8:35 AM
*/
public class EventMap extends TreeMap<Integer, VariantContext> {
private final static Logger logger = Logger.getLogger(EventMap.class);
protected final static int MIN_NUMBER_OF_EVENTS_TO_COMBINE_INTO_BLOCK_SUBSTITUTION = 3;
public final static Allele SYMBOLIC_UNASSEMBLED_EVENT_ALLELE = Allele.create("<UNASSEMBLED_EVENT>", false);
private final Haplotype haplotype;
private final byte[] ref;
private final GenomeLoc refLoc;
private final String sourceNameToAdd;
public EventMap(final Haplotype haplotype, final byte[] ref, final GenomeLoc refLoc, final String sourceNameToAdd) {
super();
this.haplotype = haplotype;
this.ref = ref;
this.refLoc = refLoc;
this.sourceNameToAdd = sourceNameToAdd;
processCigarForInitialEvents();
}
/**
* For testing. Let's you set up a explicit configuration without having to process a haplotype and reference
* @param stateForTesting
*/
protected EventMap(final Collection<VariantContext> stateForTesting) {
haplotype = null;
ref = null;
refLoc = null;
sourceNameToAdd = null;
for ( final VariantContext vc : stateForTesting )
addVC(vc);
}
protected void processCigarForInitialEvents() {
final Cigar cigar = haplotype.getCigar();
final byte[] alignment = haplotype.getBases();
int refPos = haplotype.getAlignmentStartHapwrtRef();
if( refPos < 0 ) {
return;
} // Protection against SW failures
int alignmentPos = 0;
for( int cigarIndex = 0; cigarIndex < cigar.numCigarElements(); cigarIndex++ ) {
final CigarElement ce = cigar.getCigarElement(cigarIndex);
final int elementLength = ce.getLength();
switch( ce.getOperator() ) {
case I:
{
if( refPos > 0 ) { // protect against trying to create insertions/deletions at the beginning of a contig
final List<Allele> insertionAlleles = new ArrayList<Allele>();
final int insertionStart = refLoc.getStart() + refPos - 1;
final byte refByte = ref[refPos-1];
if( BaseUtils.isRegularBase(refByte) ) {
insertionAlleles.add( Allele.create(refByte, true) );
}
if( cigarIndex == 0 || cigarIndex == cigar.getCigarElements().size() - 1 ) {
// if the insertion isn't completely resolved in the haplotype, skip it
// note this used to emit SYMBOLIC_UNASSEMBLED_EVENT_ALLELE but that seems dangerous
} else {
byte[] insertionBases = new byte[]{};
insertionBases = ArrayUtils.add(insertionBases, ref[refPos - 1]); // add the padding base
insertionBases = ArrayUtils.addAll(insertionBases, Arrays.copyOfRange(alignment, alignmentPos, alignmentPos + elementLength));
if( BaseUtils.isAllRegularBases(insertionBases) ) {
insertionAlleles.add( Allele.create(insertionBases, false) );
}
}
if( insertionAlleles.size() == 2 ) { // found a proper ref and alt allele
addVC(new VariantContextBuilder(sourceNameToAdd, refLoc.getContig(), insertionStart, insertionStart, insertionAlleles).make());
}
}
alignmentPos += elementLength;
break;
}
case S:
{
alignmentPos += elementLength;
break;
}
case D:
{
if( refPos > 0 ) { // protect against trying to create insertions/deletions at the beginning of a contig
final byte[] deletionBases = Arrays.copyOfRange( ref, refPos - 1, refPos + elementLength ); // add padding base
final List<Allele> deletionAlleles = new ArrayList<Allele>();
final int deletionStart = refLoc.getStart() + refPos - 1;
final byte refByte = ref[refPos-1];
if( BaseUtils.isRegularBase(refByte) && BaseUtils.isAllRegularBases(deletionBases) ) {
deletionAlleles.add( Allele.create(deletionBases, true) );
deletionAlleles.add( Allele.create(refByte, false) );
addVC(new VariantContextBuilder(sourceNameToAdd, refLoc.getContig(), deletionStart, deletionStart + elementLength, deletionAlleles).make());
}
}
refPos += elementLength;
break;
}
case M:
case EQ:
case X:
{
for( int iii = 0; iii < elementLength; iii++ ) {
final byte refByte = ref[refPos];
final byte altByte = alignment[alignmentPos];
if( refByte != altByte ) { // SNP!
if( BaseUtils.isRegularBase(refByte) && BaseUtils.isRegularBase(altByte) ) {
final List<Allele> snpAlleles = new ArrayList<Allele>();
snpAlleles.add( Allele.create( refByte, true ) );
snpAlleles.add( Allele.create( altByte, false ) );
addVC(new VariantContextBuilder(sourceNameToAdd, refLoc.getContig(), refLoc.getStart() + refPos, refLoc.getStart() + refPos, snpAlleles).make());
}
}
refPos++;
alignmentPos++;
}
break;
}
case N:
case H:
case P:
default:
throw new ReviewedStingException( "Unsupported cigar operator created during SW alignment: " + ce.getOperator() );
}
}
}
/**
* Add VariantContext vc to this map, merging events with the same start sites if necessary
* @param vc the variant context to add
*/
protected void addVC(final VariantContext vc) {
addVC(vc, true);
}
/**
* Add VariantContext vc to this map
* @param vc the variant context to add
* @param merge should we attempt to merge it with an already existing element, or should we throw an error in that case?
*/
protected void addVC(final VariantContext vc, final boolean merge) {
if ( vc == null ) throw new IllegalArgumentException("vc cannot be null");
if ( containsKey(vc.getStart()) ) {
if ( merge ) {
final VariantContext prev = get(vc.getStart());
put(vc.getStart(), makeBlock(prev, vc));
} else {
throw new IllegalStateException("Will not merge previously bound variant contexts as merge is false at " + vc);
}
} else
put(vc.getStart(), vc);
}
/**
* Create a block substitution out of two variant contexts that start at the same position
*
* vc1 can be SNP, and vc2 can then be either a insertion or deletion.
* If vc1 is an indel, then vc2 must be the opposite type (vc1 deletion => vc2 must be an insertion)
*
* @param vc1 the first variant context we want to merge
* @param vc2 the second
* @return a block substitution that represents the composite substitution implied by vc1 and vc2
*/
protected VariantContext makeBlock(final VariantContext vc1, final VariantContext vc2) {
if ( vc1.getStart() != vc2.getStart() ) throw new IllegalArgumentException("vc1 and 2 must have the same start but got " + vc1 + " and " + vc2);
if ( ! vc1.isBiallelic() ) throw new IllegalArgumentException("vc1 must be biallelic");
if ( ! vc1.isSNP() ) {
if ( ! ((vc1.isSimpleDeletion() && vc2.isSimpleInsertion()) || (vc1.isSimpleInsertion() && vc2.isSimpleDeletion())))
throw new IllegalArgumentException("Can only merge single insertion with deletion (or vice versa) but got " + vc1 + " merging with " + vc2);
} else if ( vc2.isSNP() ) {
throw new IllegalArgumentException("vc1 is " + vc1 + " but vc2 is a SNP, which implies there's been some terrible bug in the cigar " + vc2);
}
final Allele ref, alt;
final VariantContextBuilder b = new VariantContextBuilder(vc1);
if ( vc1.isSNP() ) {
// we have to repair the first base, so SNP case is special cased
if ( vc1.getReference().equals(vc2.getReference()) ) {
// we've got an insertion, so we just update the alt to have the prev alt
ref = vc1.getReference();
alt = Allele.create(vc1.getAlternateAllele(0).getDisplayString() + vc2.getAlternateAllele(0).getDisplayString().substring(1), false);
} else {
// we're dealing with a deletion, so we patch the ref
ref = vc2.getReference();
alt = vc1.getAlternateAllele(0);
b.stop(vc2.getEnd());
}
} else {
final VariantContext insertion = vc1.isSimpleInsertion() ? vc1 : vc2;
final VariantContext deletion = vc1.isSimpleInsertion() ? vc2 : vc1;
ref = deletion.getReference();
alt = insertion.getAlternateAllele(0);
b.stop(deletion.getEnd());
}
return b.alleles(Arrays.asList(ref, alt)).make();
}
// TODO -- warning this is an O(N^3) algorithm because I'm just lazy. If it's valuable we need to reengineer it
@Requires("getNumberOfEvents() > 0")
protected void replaceClumpedEventsWithBlockSubstititions() {
if ( getNumberOfEvents() >= MIN_NUMBER_OF_EVENTS_TO_COMBINE_INTO_BLOCK_SUBSTITUTION) {
int lastStart = -1;
for ( boolean foundOne = true; foundOne; ) {
foundOne = false;
for ( final VariantContext vc : getVariantContexts() ) {
if ( vc.getStart() > lastStart ) {
lastStart = vc.getStart();
final List<VariantContext> neighborhood = getNeighborhood(vc, 10);
if ( updateToBlockSubstitutionIfBetter(neighborhood) ) {
foundOne = true;
break;
}
}
}
}
}
}
protected boolean updateToBlockSubstitutionIfBetter(final List<VariantContext> neighbors) {
if (neighbors.size() < MIN_NUMBER_OF_EVENTS_TO_COMBINE_INTO_BLOCK_SUBSTITUTION)
return false;
// TODO -- need more tests to decide if this is really so good
final VariantContext first = neighbors.get(0);
final int refStartOffset = first.getStart() - refLoc.getStart();
final int refEndOffset = neighbors.get(neighbors.size() - 1).getEnd() - refLoc.getStart();
final byte[] refBases = Arrays.copyOfRange(ref, refStartOffset, refEndOffset + 1);
final byte[] hapBases = AlignmentUtils.getBasesCoveringRefInterval(refStartOffset, refEndOffset, haplotype.getBases(), haplotype.getAlignmentStartHapwrtRef(), haplotype.getCigar());
final VariantContextBuilder builder = new VariantContextBuilder(first);
builder.stop(first.getStart() + refBases.length - 1);
builder.alleles(Arrays.asList(Allele.create(refBases, true), Allele.create(hapBases)));
final VariantContext block = builder.make();
// remove all merged events
for ( final VariantContext merged : neighbors ) {
if ( remove(merged.getStart()) == null )
throw new IllegalArgumentException("Expected to remove variant context from the event map but remove said there wasn't any element there: " + merged);
}
// note must be after we remove the previous events as the treeset only allows one key per start
logger.info("Transforming into block substitution at " + block);
addVC(block, false);
return true;
}
/**
* Get all of the variant contexts starting at leftMost that are within maxBP of each other
*
* @param leftMost the left most (smallest position) variant context that will start the neighborhood
* @param maxBPBetweenEvents the maximum distance in BP between the end of one event the start of the next
* to be included the the resulting list
* @return a list that contains at least one element (leftMost)
*/
@Requires({"leftMost != null", "maxBPBetweenEvents >= 0"})
@Ensures({"result != null", "! result.isEmpty()"})
protected List<VariantContext> getNeighborhood(final VariantContext leftMost, final int maxBPBetweenEvents) {
final List<VariantContext> neighbors = new LinkedList<VariantContext>();
VariantContext left = leftMost;
for ( final VariantContext vc : getVariantContexts() ) {
if ( vc.getStart() < leftMost.getStart() )
continue;
if ( vc.getStart() - left.getEnd() < maxBPBetweenEvents ) {
// this vc is within max distance to the end of the left event, so accumulate it
neighbors.add(vc);
left = vc;
}
}
return neighbors;
}
/**
* Get the starting positions of events in this event map
* @return
*/
public Set<Integer> getStartPositions() {
return keySet();
}
/**
* Get the variant contexts in order of start position in this event map
* @return
*/
public Collection<VariantContext> getVariantContexts() {
return values();
}
/**
* How many events do we have?
* @return
*/
public int getNumberOfEvents() {
return size();
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder("EventMap{");
for ( final VariantContext vc : getVariantContexts() )
b.append(String.format("%s:%d-%d %s,", vc.getChr(), vc.getStart(), vc.getEnd(), vc.getAlleles()));
b.append("}");
return b.toString();
}
/**
* Build event maps for each haplotype, returning the sorted set of all of the starting positions of all
* events across all haplotypes
*
* @param haplotypes a list of haplotypes
* @param ref the reference bases
* @param refLoc the span of the reference bases
* @param debug if true, we'll emit debugging information during this operation
* @return a sorted set of start positions of all events among all haplotypes
*/
public static TreeSet<Integer> buildEventMapsForHaplotypes( final List<Haplotype> haplotypes,
final byte[] ref,
final GenomeLoc refLoc,
final boolean debug) {
// Using the cigar from each called haplotype figure out what events need to be written out in a VCF file
final TreeSet<Integer> startPosKeySet = new TreeSet<Integer>();
int hapNumber = 0;
if( debug ) logger.info("=== Best Haplotypes ===");
for( final Haplotype h : haplotypes ) {
// Walk along the alignment and turn any difference from the reference into an event
h.setEventMap( new EventMap( h, ref, refLoc, "HC" + hapNumber++ ) );
startPosKeySet.addAll(h.getEventMap().getStartPositions());
if( debug ) {
logger.info(h.toString());
logger.info("> Cigar = " + h.getCigar());
logger.info(">> Events = " + h.getEventMap());
}
}
return startPosKeySet;
}
private static class VariantContextComparator implements Comparator<VariantContext> {
@Override
public int compare(VariantContext vc1, VariantContext vc2) {
return vc1.getStart() - vc2.getStart();
}
}
/**
* Get all of the VariantContexts in the event maps for all haplotypes, sorted by their start position
* @param haplotypes the set of haplotypes to grab the VCs from
* @return a sorted set of variant contexts
*/
public static TreeSet<VariantContext> getAllVariantContexts( final List<Haplotype> haplotypes ) {
// Using the cigar from each called haplotype figure out what events need to be written out in a VCF file
final TreeSet<VariantContext> vcs = new TreeSet<VariantContext>(new VariantContextComparator());
for( final Haplotype h : haplotypes ) {
vcs.addAll(h.getEventMap().getVariantContexts());
}
return vcs;
}
}

View File

@ -23,7 +23,7 @@
* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.utils;
package org.broadinstitute.sting.utils.haplotype;
import com.google.java.contract.Requires;
import net.sf.samtools.Cigar;
@ -31,18 +31,19 @@ import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import org.apache.commons.lang.ArrayUtils;
import org.broadinstitute.sting.gatk.contexts.ReferenceContext;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.exceptions.ReviewedStingException;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;
import org.broadinstitute.sting.utils.sam.ReadUtils;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import java.io.Serializable;
import java.util.*;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
public class Haplotype extends Allele {
private GenomeLoc genomeLocation = null;
private Map<Integer, VariantContext> eventMap = null;
private EventMap eventMap = null;
private Cigar cigar;
private int alignmentStartHapwrtRef;
private Event artificialEvent = null;
@ -51,17 +52,38 @@ public class Haplotype extends Allele {
/**
* Main constructor
*
* @param bases bases
* @param isRef is reference allele?
* @param bases a non-null array of bases
* @param isRef is this the reference haplotype?
*/
public Haplotype( final byte[] bases, final boolean isRef ) {
super(bases.clone(), isRef);
}
/**
* Create a new non-ref haplotype
*
* @param bases a non-null array of bases
*/
public Haplotype( final byte[] bases ) {
this(bases, false);
}
/**
* Create a new haplotype with bases
*
* Requires bases.length == cigar.getReadLength()
*
* @param bases a non-null array of bases
* @param isRef is this the reference haplotype?
* @param alignmentStartHapwrtRef offset of this haplotype w.r.t. the reference
* @param cigar the cigar that maps this haplotype to the reference sequence
*/
public Haplotype( final byte[] bases, final boolean isRef, final int alignmentStartHapwrtRef, final Cigar cigar) {
this(bases, isRef);
this.alignmentStartHapwrtRef = alignmentStartHapwrtRef;
setCigar(cigar);
}
/**
* Copy constructor. Note the ref state of the provided allele is ignored!
*
@ -81,6 +103,40 @@ public class Haplotype extends Allele {
this.genomeLocation = loc;
}
/**
* Create a new Haplotype derived from this one that exactly spans the provided location
*
* Note that this haplotype must have a contain a genome loc for this operation to be successful. If no
* GenomeLoc is contained than @throws an IllegalStateException
*
* Also loc must be fully contained within this Haplotype's genomeLoc. If not an IllegalArgumentException is
* thrown.
*
* @param loc a location completely contained within this Haplotype's location
* @return a new Haplotype within only the bases spanning the provided location, or null for some reason the haplotype would be malformed if
*/
public Haplotype trim(final GenomeLoc loc) {
if ( loc == null ) throw new IllegalArgumentException("Loc cannot be null");
if ( genomeLocation == null ) throw new IllegalStateException("Cannot trim a Haplotype without containing GenomeLoc");
if ( ! genomeLocation.containsP(loc) ) throw new IllegalArgumentException("Can only trim a Haplotype to a containing span. My loc is " + genomeLocation + " but wanted trim to " + loc);
if ( getCigar() == null ) throw new IllegalArgumentException("Cannot trim haplotype without a cigar " + this);
final int newStart = loc.getStart() - this.genomeLocation.getStart();
final int newStop = newStart + loc.size() - 1;
final byte[] newBases = AlignmentUtils.getBasesCoveringRefInterval(newStart, newStop, getBases(), 0, getCigar());
final Cigar newCigar = AlignmentUtils.trimCigarByReference(getCigar(), newStart, newStop);
if ( newBases == null || AlignmentUtils.startsOrEndsWithInsertionOrDeletion(newCigar) )
// we cannot meaningfully chop down the haplotype, so return null
return null;
final Haplotype ret = new Haplotype(newBases, isReference());
ret.setCigar(newCigar);
ret.setGenomeLocation(loc);
ret.setAlignmentStartHapwrtRef(newStart + getAlignmentStartHapwrtRef());
return ret;
}
@Override
public boolean equals( Object h ) {
return h instanceof Haplotype && Arrays.equals(getBases(), ((Haplotype) h).getBases());
@ -91,11 +147,11 @@ public class Haplotype extends Allele {
return Arrays.hashCode(getBases());
}
public Map<Integer, VariantContext> getEventMap() {
public EventMap getEventMap() {
return eventMap;
}
public void setEventMap( final Map<Integer, VariantContext> eventMap ) {
public void setEventMap( final EventMap eventMap ) {
this.eventMap = eventMap;
}
@ -104,6 +160,18 @@ public class Haplotype extends Allele {
return getDisplayString();
}
/**
* Get the span of this haplotype (may be null)
* @return a potentially null genome loc
*/
public GenomeLoc getGenomeLocation() {
return genomeLocation;
}
public void setGenomeLocation(GenomeLoc genomeLocation) {
this.genomeLocation = genomeLocation;
}
public long getStartPosition() {
return genomeLocation.getStart();
}
@ -120,6 +188,11 @@ public class Haplotype extends Allele {
this.alignmentStartHapwrtRef = alignmentStartHapwrtRef;
}
/**
* Get the cigar for this haplotype. Note that cigar is guarenteed to be consolidated
* in that multiple adjacent equal operates will have been merged
* @return the cigar of this haplotype
*/
public Cigar getCigar() {
return cigar;
}
@ -137,8 +210,17 @@ public class Haplotype extends Allele {
return AlignmentUtils.consolidateCigar(extendedHaplotypeCigar);
}
/**
* Set the cigar of this haplotype to cigar.
*
* Note that this function consolidates the cigar, so that 1M1M1I1M1M => 2M1I2M
*
* @param cigar a cigar whose readLength == length()
*/
public void setCigar( final Cigar cigar ) {
this.cigar = cigar;
this.cigar = AlignmentUtils.consolidateCigar(cigar);
if ( this.cigar.getReadLength() != length() )
throw new IllegalArgumentException("Read length " + length() + " not equal to the read length of the cigar " + cigar.getReadLength());
}
public boolean isArtificialHaplotype() {
@ -181,25 +263,6 @@ public class Haplotype extends Allele {
return new Haplotype(newHaplotypeBases, new Event(refAllele, altAllele, genomicInsertLocation));
}
public static class HaplotypeBaseComparator implements Comparator<Haplotype>, Serializable {
@Override
public int compare( final Haplotype hap1, final Haplotype hap2 ) {
return compareHaplotypeBases(hap1, hap2);
}
public static int compareHaplotypeBases(final Haplotype hap1, final Haplotype hap2) {
final byte[] arr1 = hap1.getBases();
final byte[] arr2 = hap2.getBases();
// compares byte arrays using lexical ordering
final int len = Math.min(arr1.length, arr2.length);
for( int iii = 0; iii < len; iii++ ) {
final int cmp = arr1[iii] - arr2[iii];
if (cmp != 0) { return cmp; }
}
return arr2.length - arr1.length;
}
}
public static LinkedHashMap<Allele,Haplotype> makeHaplotypeListFromAlleles(final List<Allele> alleleList,
final int startPos,
final ReferenceContext ref,
@ -278,15 +341,4 @@ public class Haplotype extends Allele {
public void setScore(double score) {
this.score = this.isReference() ? Double.MAX_VALUE : score;
}
/**
* A comparator that sorts haplotypes in decreasing order of score, so that the best supported
* haplotypes are at the top
*/
public static class ScoreComparator implements Comparator<Haplotype> {
@Override
public int compare(Haplotype o1, Haplotype o2) {
return -1 * Double.valueOf(o1.getScore()).compareTo(o2.getScore());
}
}
}

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2012 The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.utils.haplotype;
import java.util.Comparator;
/**
* Compares two haplotypes in the lexicographic order of their bases
*
* User: depristo
* Date: 3/29/13
* Time: 11:09 AM
*/
public class HaplotypeBaseComparator implements Comparator<Haplotype> {
@Override
public int compare( final Haplotype hap1, final Haplotype hap2 ) {
return hap1.getBaseString().compareTo(hap2.getBaseString());
}
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2012 The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.utils.haplotype;
import java.util.Comparator;
/**
* A comparator that sorts haplotypes in decreasing order of score, so that the best supported
* haplotypes are at the top
*/
public class HaplotypeScoreComparator implements Comparator<Haplotype> {
@Override
public int compare(Haplotype o1, Haplotype o2) {
return -1 * Double.valueOf(o1.getScore()).compareTo(o2.getScore());
}
}

View File

@ -26,11 +26,8 @@
package org.broadinstitute.sting.utils.haplotypeBAMWriter;
import net.sf.samtools.*;
import org.broadinstitute.sting.gatk.io.StingSAMFileWriter;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;

View File

@ -27,7 +27,7 @@ package org.broadinstitute.sting.utils.haplotypeBAMWriter;
import net.sf.samtools.SAMFileWriter;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.genotyper.MostLikelyAllele;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;
import org.broadinstitute.sting.utils.sam.GATKSAMRecord;

View File

@ -28,7 +28,7 @@ package org.broadinstitute.sting.utils.haplotypeBAMWriter;
import net.sf.samtools.*;
import org.broadinstitute.sting.gatk.io.StingSAMFileWriter;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.genotyper.PerReadAlleleLikelihoodMap;

View File

@ -48,6 +48,95 @@ public final class AlignmentUtils {
// cannot be instantiated
private AlignmentUtils() { }
/**
* Does cigar start or end with a deletion operation?
*
* @param cigar a non-null cigar to test
* @return true if the first or last operator of cigar is a D
*/
public static boolean startsOrEndsWithInsertionOrDeletion(final Cigar cigar) {
if ( cigar == null ) throw new IllegalArgumentException("Cigar cannot be null");
if ( cigar.isEmpty() )
return false;
final CigarOperator first = cigar.getCigarElement(0).getOperator();
final CigarOperator last = cigar.getCigarElement(cigar.numCigarElements()-1).getOperator();
return first == CigarOperator.D || first == CigarOperator.I || last == CigarOperator.D || last == CigarOperator.I;
}
/**
* Get the byte[] from bases that cover the reference interval refStart -> refEnd given the
* alignment of bases to the reference (basesToRefCigar) and the start offset of the bases on the reference
*
* refStart and refEnd are 0 based offsets that we want to obtain. In the client code, if the reference
* bases start at position X and you want Y -> Z, refStart should be Y - X and refEnd should be Z - X.
*
* If refStart or refEnd would start or end the new bases within a deletion, this function will return null
*
* @param bases
* @param refStart
* @param refEnd
* @param basesStartOnRef where does the bases array start w.r.t. the reference start? For example, bases[0] of
* could be at refStart == 0 if basesStartOnRef == 0, but it could just as easily be at
* 10 (meaning bases doesn't fully span the reference), which would be indicated by basesStartOnRef == 10.
* It's not trivial to eliminate this parameter because it's tied up with the cigar
* @param basesToRefCigar the cigar that maps the bases to the reference genome
* @return a byte[] containing the bases covering this interval, or null if we would start or end within a deletion
*/
public static byte[] getBasesCoveringRefInterval(final int refStart, final int refEnd, final byte[] bases, final int basesStartOnRef, final Cigar basesToRefCigar) {
if ( refStart < 0 || refEnd < refStart ) throw new IllegalArgumentException("Bad start " + refStart + " and/or stop " + refEnd);
if ( basesStartOnRef < 0 ) throw new IllegalArgumentException("BasesStartOnRef must be >= 0 but got " + basesStartOnRef);
if ( bases == null ) throw new IllegalArgumentException("Bases cannot be null");
if ( basesToRefCigar == null ) throw new IllegalArgumentException("basesToRefCigar cannot be null");
if ( bases.length != basesToRefCigar.getReadLength() ) throw new IllegalArgumentException("Mismatch in length between reference bases " + bases.length + " and cigar length " + basesToRefCigar);
int refPos = basesStartOnRef;
int basesPos = 0;
int basesStart = -1;
int basesStop = -1;
boolean done = false;
for ( int iii = 0; ! done && iii < basesToRefCigar.numCigarElements(); iii++ ) {
final CigarElement ce = basesToRefCigar.getCigarElement(iii);
switch ( ce.getOperator() ) {
case I:
basesPos += ce.getLength();
break;
case M: case X: case EQ:
for ( int i = 0; i < ce.getLength(); i++ ) {
if ( refPos == refStart )
basesStart = basesPos;
if ( refPos == refEnd ) {
basesStop = basesPos;
done = true;
break;
}
refPos++;
basesPos++;
}
break;
case D:
for ( int i = 0; i < ce.getLength(); i++ ) {
if ( refPos == refEnd || refPos == refStart ) {
// if we ever reach a ref position that is either a start or an end, we fail
return null;
}
refPos++;
}
break;
default:
throw new IllegalStateException("Unsupported operator " + ce);
}
}
if ( basesStart == -1 || basesStop == -1 )
throw new IllegalStateException("Never found start " + basesStart + " or stop " + basesStop + " given cigar " + basesToRefCigar);
return Arrays.copyOfRange(bases, basesStart, basesStop + 1);
}
/**
* Get the number of bases at which refSeq and readSeq differ, given their alignment
*

View File

@ -345,24 +345,50 @@ public class GATKSAMRecord extends BAMRecord {
// *** ReduceReads functions ***//
///////////////////////////////////////////////////////////////////////////////
/**
* Get the counts of the bases in this reduced read
*
* NOTE that this is not the value of the REDUCED_READ_CONSENSUS_TAG, which
* is encoded in a special way. This is the actual positive counts of the
* depth at each bases. So for a RR with a tag of:
*
* [10, 5, -1, -5]
*
* this function returns
*
* [10, 15, 9, 5]
*
* as one might expect.
*
* @return a byte[] holding the depth of the bases in this reduced read, or null if this isn't a reduced read
*/
public byte[] getReducedReadCounts() {
if ( ! retrievedReduceReadCounts ) {
reducedReadCounts = getByteArrayAttribute(REDUCED_READ_CONSENSUS_TAG);
final byte[] tag = getByteArrayAttribute(REDUCED_READ_CONSENSUS_TAG);
if ( tag != null ) reducedReadCounts = decodeReadReadCounts(tag);
retrievedReduceReadCounts = true;
}
return reducedReadCounts;
}
/**
* Is this read a reduced read?
* @return true if yes
*/
public boolean isReducedRead() {
return getReducedReadCounts() != null;
}
/**
* Set the reduced read counts for this record to counts
* Set the reduced read counts tag for this record to counts
*
* WARNING -- this function assumes that counts is encoded as a difference in value count
* of count[i] - count[0]. It is not a straight counting of the bases in the read.
*
* @param counts the count array
*/
public void setReducedReadCounts(final byte[] counts) {
public void setReducedReadCountsTag(final byte[] counts) {
retrievedReduceReadCounts = false;
setAttribute(REDUCED_READ_CONSENSUS_TAG, counts);
}
@ -374,9 +400,32 @@ public class GATKSAMRecord extends BAMRecord {
* @return the number of bases corresponding to the i'th base of the reduced read
*/
public final byte getReducedCount(final int i) {
byte firstCount = getReducedReadCounts()[0];
byte offsetCount = getReducedReadCounts()[i];
return (i==0) ? firstCount : (byte) Math.min(firstCount + offsetCount, Byte.MAX_VALUE);
return getReducedReadCounts()[i];
}
/**
* Actually decode the consensus tag of a reduce read, returning a newly allocated
* set of values countsFromTag to be the real depth of cover at each base of the reduced read.
*
* for example, if the tag contains [10, 5, -1, -5], after running this function the
* byte[] will contain the true counts [10, 15, 9, 5].
*
* as one might expect.
*
* @param countsFromTag a non-null byte[] containing the tag encoded reduce reads counts
* @return a non-null byte[] containing the true depth values for the vector
*/
private byte[] decodeReadReadCounts(final byte[] countsFromTag) {
final int n = countsFromTag.length;
final byte[] result = new byte[n];
final byte firstCount = countsFromTag[0];
result[0] = firstCount;
for ( int i = 1; i < n; i++) {
final byte offsetCount = countsFromTag[i];
result[i] = (byte) Math.min(firstCount + offsetCount, Byte.MAX_VALUE);
}
return result;
}
///////////////////////////////////////////////////////////////////////////////

View File

@ -1424,4 +1424,21 @@ public class GATKVariantContextUtils {
return result;
}
/**
* Are vc1 and 2 equal including their position and alleles?
* @param vc1 non-null VariantContext
* @param vc2 non-null VariantContext
* @return true if vc1 and vc2 are equal, false otherwise
*/
public static boolean equalSites(final VariantContext vc1, final VariantContext vc2) {
if ( vc1 == null ) throw new IllegalArgumentException("vc1 cannot be null");
if ( vc2 == null ) throw new IllegalArgumentException("vc2 cannot be null");
if ( vc1.getStart() != vc2.getStart() ) return false;
if ( vc1.getEnd() != vc2.getEnd() ) return false;
if ( ! vc1.getChr().equals(vc2.getChr())) return false;
if ( ! vc1.getAlleles().equals(vc2.getAlleles()) ) return false;
return true;
}
}

View File

@ -49,7 +49,7 @@ import java.util.*;
public class ActiveRegionUnitTest extends BaseTest {
private final static boolean DEBUG = true;
private final static boolean DEBUG = false;
private GenomeLocParser genomeLocParser;
private IndexedFastaSequenceFile seq;
private String contig;
@ -309,4 +309,75 @@ public class ActiveRegionUnitTest extends BaseTest {
}
}
}
// -----------------------------------------------------------------------------------------------
//
// Make sure we can properly cut up an active region based on engine intervals
//
// -----------------------------------------------------------------------------------------------
@DataProvider(name = "TrimActiveRegionData")
public Object[][] makeTrimActiveRegionData() {
List<Object[]> tests = new ArrayList<Object[]>();
// fully enclosed within active region
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 15, 16),
genomeLocParser.createGenomeLoc("20", 15, 16), 0});
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 10, 15),
genomeLocParser.createGenomeLoc("20", 10, 15), 0});
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 15, 20),
genomeLocParser.createGenomeLoc("20", 15, 20), 0});
// needs extra padding on the right
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 15, 25),
genomeLocParser.createGenomeLoc("20", 15, 20), 5});
// needs extra padding on the left
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 5, 15),
genomeLocParser.createGenomeLoc("20", 10, 15), 5});
// needs extra padding on both
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 7, 21),
genomeLocParser.createGenomeLoc("20", 10, 20), 3});
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 9, 23),
genomeLocParser.createGenomeLoc("20", 10, 20), 3});
// desired span captures everything, so we're returning everything. Tests that extension is set correctly
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 10, 20), 10,
genomeLocParser.createGenomeLoc("20", 1, 50),
genomeLocParser.createGenomeLoc("20", 10, 20), 10});
// At the start of the chromosome, potentially a bit weird
tests.add(new Object[]{
genomeLocParser.createGenomeLoc("20", 1, 10), 10,
genomeLocParser.createGenomeLoc("20", 1, 50),
genomeLocParser.createGenomeLoc("20", 1, 10), 10});
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "TrimActiveRegionData")
public void testTrimActiveRegion(final GenomeLoc regionLoc, final int extension, final GenomeLoc desiredSpan, final GenomeLoc expectedActiveRegion, final int expectedExtension) {
final ActiveRegion region = new ActiveRegion(regionLoc, Collections.<ActivityProfileState>emptyList(), true, genomeLocParser, extension);
final ActiveRegion trimmed = region.trim(desiredSpan);
Assert.assertEquals(trimmed.getLocation(), expectedActiveRegion, "Incorrect region");
Assert.assertEquals(trimmed.getExtension(), expectedExtension, "Incorrect region");
}
}

View File

@ -0,0 +1,203 @@
/*
* Copyright (c) 2012 The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.utils.haplotype;
import net.sf.samtools.TextCigarCodec;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.UnvalidatingGenomeLoc;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.variant.GATKVariantContextUtils;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.*;
public class EventMapUnitTest extends BaseTest {
private final static String CHR = "20";
private final static String NAME = "foo";
@DataProvider(name = "MyDataProvider")
public Object[][] makeMyDataProvider() {
List<Object[]> tests = new ArrayList<Object[]>();
final List<String> SNP_ALLELES = Arrays.asList("A", "C");
final List<String> INS_ALLELES = Arrays.asList("A", "ACGTGA");
final List<String> DEL_ALLELES = Arrays.asList("ACGTA", "C");
final List<List<String>> allAlleles = Arrays.asList(SNP_ALLELES, INS_ALLELES, DEL_ALLELES);
for ( final int leftNotClump : Arrays.asList(-1, 3) ) {
for ( final int middleNotClump : Arrays.asList(-1, 10, 500) ) {
for ( final int rightNotClump : Arrays.asList(-1, 1000) ) {
for ( final int nClumped : Arrays.asList(3, 4) ) {
for ( final List<List<String>> alleles : Utils.makePermutations(allAlleles, nClumped, true)) {
final List<VariantContext> allVCS = new LinkedList<VariantContext>();
if ( leftNotClump != -1 ) allVCS.add(GATKVariantContextUtils.makeFromAlleles(NAME, CHR, leftNotClump, SNP_ALLELES));
if ( middleNotClump != -1 ) allVCS.add(GATKVariantContextUtils.makeFromAlleles(NAME, CHR, middleNotClump, SNP_ALLELES));
if ( rightNotClump != -1 ) allVCS.add(GATKVariantContextUtils.makeFromAlleles(NAME, CHR, rightNotClump, SNP_ALLELES));
int clumpStart = 50;
final List<VariantContext> vcs = new LinkedList<VariantContext>();
for ( final List<String> myAlleles : alleles ) {
final VariantContext vc = GATKVariantContextUtils.makeFromAlleles(NAME, CHR, clumpStart, myAlleles);
clumpStart = vc.getEnd() + 3;
vcs.add(vc);
}
tests.add(new Object[]{new EventMap(new LinkedList<VariantContext>(allVCS)), Collections.emptyList()});
allVCS.addAll(vcs);
tests.add(new Object[]{new EventMap(allVCS), vcs});
}
}
}
}
}
return tests.toArray(new Object[][]{});
}
/**
* Example testng test using MyDataProvider
*/
@Test(dataProvider = "MyDataProvider", enabled = true)
public void testGetNeighborhood(final EventMap eventMap, final List<VariantContext> expectedNeighbors) {
final VariantContext leftOfNeighors = expectedNeighbors.isEmpty() ? null : expectedNeighbors.get(0);
for ( final VariantContext vc : eventMap.getVariantContexts() ) {
final List<VariantContext> n = eventMap.getNeighborhood(vc, 5);
if ( leftOfNeighors == vc )
Assert.assertEquals(n, expectedNeighbors);
else if ( ! expectedNeighbors.contains(vc) )
Assert.assertEquals(n, Collections.singletonList(vc), "Should only contain the original vc but " + n);
}
}
@DataProvider(name = "BlockSubstitutionsData")
public Object[][] makeBlockSubstitutionsData() {
List<Object[]> tests = new ArrayList<Object[]>();
for ( int size = EventMap.MIN_NUMBER_OF_EVENTS_TO_COMBINE_INTO_BLOCK_SUBSTITUTION; size < 10; size++ ) {
final String ref = Utils.dupString("A", size);
final String alt = Utils.dupString("C", size);
tests.add(new Object[]{ref, alt, size + "M", GATKVariantContextUtils.makeFromAlleles(NAME, CHR, 1, Arrays.asList(ref, alt))});
}
tests.add(new Object[]{"AAAAAA", "GAGAGA", "6M", GATKVariantContextUtils.makeFromAlleles(NAME, CHR, 1, Arrays.asList("AAAAA", "GAGAG"))});
tests.add(new Object[]{"AAAAAA", "GAGAGG", "6M", GATKVariantContextUtils.makeFromAlleles(NAME, CHR, 1, Arrays.asList("AAAAAA", "GAGAGG"))});
for ( int len = 0; len < 10; len++ ) {
final String s = len == 0 ? "" : Utils.dupString("A", len);
tests.add(new Object[]{s + "AACCCCAA", s + "GAAG", len + 2 + "M4D2M", GATKVariantContextUtils.makeFromAlleles(NAME, CHR, 1 + len, Arrays.asList("AACCCCAA", "GAAG"))});
tests.add(new Object[]{s + "AAAA", s + "GACCCCAG", len + 2 + "M4I2M", GATKVariantContextUtils.makeFromAlleles(NAME, CHR, 1 + len, Arrays.asList("AAAA", "GACCCCAG"))});
tests.add(new Object[]{"AACCCCAA" + s, "GAAG" + s, "2M4D" + (len + 2) + "M", GATKVariantContextUtils.makeFromAlleles(NAME, CHR, 1, Arrays.asList("AACCCCAA", "GAAG"))});
tests.add(new Object[]{"AAAA" + s, "GACCCCAG" + s, "2M4I" + (len + 2) + "M", GATKVariantContextUtils.makeFromAlleles(NAME, CHR, 1, Arrays.asList("AAAA", "GACCCCAG"))});
}
return tests.toArray(new Object[][]{});
}
/**
* Example testng test using MyDataProvider
*/
@Test(dataProvider = "BlockSubstitutionsData")
public void testBlockSubstitutionsData(final String refBases, final String haplotypeBases, final String cigar, final VariantContext expectedBlock) {
final Haplotype hap = new Haplotype(haplotypeBases.getBytes(), false, 0, TextCigarCodec.getSingleton().decode(cigar));
final GenomeLoc loc = new UnvalidatingGenomeLoc(CHR, 0, 1, refBases.length());
final EventMap ee = new EventMap(hap, refBases.getBytes(), loc, NAME);
ee.replaceClumpedEventsWithBlockSubstititions();
Assert.assertEquals(ee.getNumberOfEvents(), 1);
final VariantContext actual = ee.getVariantContexts().iterator().next();
Assert.assertTrue(GATKVariantContextUtils.equalSites(actual, expectedBlock), "Failed with " + actual);
}
@DataProvider(name = "AdjacentSNPIndelTest")
public Object[][] makeAdjacentSNPIndelTest() {
List<Object[]> tests = new ArrayList<Object[]>();
tests.add(new Object[]{"TT", "GCT", "1M1I1M", Arrays.asList(Arrays.asList("T", "GC"))});
tests.add(new Object[]{"GCT", "TT", "1M1D1M", Arrays.asList(Arrays.asList("GC", "T"))});
tests.add(new Object[]{"TT", "GCCT", "1M2I1M", Arrays.asList(Arrays.asList("T", "GCC"))});
tests.add(new Object[]{"GCCT", "TT", "1M2D1M", Arrays.asList(Arrays.asList("GCC", "T"))});
tests.add(new Object[]{"AAGCCT", "AATT", "3M2D1M", Arrays.asList(Arrays.asList("GCC", "T"))});
tests.add(new Object[]{"AAGCCT", "GATT", "3M2D1M", Arrays.asList(Arrays.asList("A", "G"), Arrays.asList("GCC", "T"))});
tests.add(new Object[]{"AAAAA", "AGACA", "5M", Arrays.asList(Arrays.asList("A", "G"), Arrays.asList("A", "C"))});
return tests.toArray(new Object[][]{});
}
/**
* Example testng test using MyDataProvider
*/
@Test(dataProvider = "AdjacentSNPIndelTest")
public void testAdjacentSNPIndelTest(final String refBases, final String haplotypeBases, final String cigar, final List<List<String>> expectedAlleles) {
final Haplotype hap = new Haplotype(haplotypeBases.getBytes(), false, 0, TextCigarCodec.getSingleton().decode(cigar));
final GenomeLoc loc = new UnvalidatingGenomeLoc(CHR, 0, 1, refBases.length());
final EventMap ee = new EventMap(hap, refBases.getBytes(), loc, NAME);
ee.replaceClumpedEventsWithBlockSubstititions();
Assert.assertEquals(ee.getNumberOfEvents(), expectedAlleles.size());
final List<VariantContext> actuals = new ArrayList<VariantContext>(ee.getVariantContexts());
for ( int i = 0; i < ee.getNumberOfEvents(); i++ ) {
final VariantContext actual = actuals.get(i);
Assert.assertEquals(actual.getReference().getDisplayString(), expectedAlleles.get(i).get(0));
Assert.assertEquals(actual.getAlternateAllele(0).getDisplayString(), expectedAlleles.get(i).get(1));
}
}
@DataProvider(name = "MakeBlockData")
public Object[][] makeMakeBlockData() {
List<Object[]> tests = new ArrayList<Object[]>();
tests.add(new Object[]{Arrays.asList("A", "G"), Arrays.asList("AGT", "A"), Arrays.asList("AGT", "G")});
tests.add(new Object[]{Arrays.asList("A", "G"), Arrays.asList("A", "AGT"), Arrays.asList("A", "GGT")});
tests.add(new Object[]{Arrays.asList("AC", "A"), Arrays.asList("A", "AGT"), Arrays.asList("AC", "AGT")});
tests.add(new Object[]{Arrays.asList("ACGTA", "A"), Arrays.asList("A", "AG"), Arrays.asList("ACGTA", "AG")});
tests.add(new Object[]{Arrays.asList("AC", "A"), Arrays.asList("A", "AGCGT"), Arrays.asList("AC", "AGCGT")});
tests.add(new Object[]{Arrays.asList("A", "ACGTA"), Arrays.asList("AG", "A"), Arrays.asList("AG", "ACGTA")});
tests.add(new Object[]{Arrays.asList("A", "AC"), Arrays.asList("AGCGT", "A"), Arrays.asList("AGCGT", "AC")});
return tests.toArray(new Object[][]{});
}
/**
* Example testng test using MyDataProvider
*/
@Test(dataProvider = "MakeBlockData", enabled = true)
public void testGetNeighborhood(final List<String> firstAlleles, final List<String> secondAlleles, final List<String> expectedAlleles) {
final VariantContext vc1 = GATKVariantContextUtils.makeFromAlleles("x", "20", 10, firstAlleles);
final VariantContext vc2 = GATKVariantContextUtils.makeFromAlleles("x", "20", 10, secondAlleles);
final VariantContext expected = GATKVariantContextUtils.makeFromAlleles("x", "20", 10, expectedAlleles);
final EventMap eventMap = new EventMap(Collections.<VariantContext>emptyList());
final VariantContext block = eventMap.makeBlock(vc1, vc2);
Assert.assertEquals(block.getStart(), expected.getStart());
Assert.assertEquals(block.getAlleles(), expected.getAlleles());
}
}

View File

@ -23,20 +23,23 @@
* THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.utils;
package org.broadinstitute.sting.utils.haplotype;
import net.sf.picard.util.CigarUtil;
import net.sf.samtools.Cigar;
import net.sf.samtools.CigarElement;
import net.sf.samtools.CigarOperator;
import net.sf.samtools.TextCigarCodec;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.GenomeLoc;
import org.broadinstitute.sting.utils.UnvalidatingGenomeLoc;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.variant.variantcontext.Allele;
import org.broadinstitute.variant.variantcontext.VariantContext;
import org.broadinstitute.variant.variantcontext.VariantContextBuilder;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.*;
@ -45,10 +48,6 @@ import java.util.*;
* Basic unit test for Haplotype Class
*/
public class HaplotypeUnitTest extends BaseTest {
@BeforeClass
public void init() {
}
@Test
public void testSimpleInsertionAllele() {
final String bases = "ACTGGTCAACTGGTCAACTGGTCAACTGGTCA";
@ -183,4 +182,68 @@ public class HaplotypeUnitTest extends BaseTest {
Assert.assertEquals(makeHCForCigar("AGCT", "1M1I1I1I").getConsolidatedPaddedCigar(1).toString(), "1M3I1M");
Assert.assertEquals(makeHCForCigar("AGCT", "1M1I1I1I").getConsolidatedPaddedCigar(2).toString(), "1M3I2M");
}
@DataProvider(name = "TrimmingData")
public Object[][] makeTrimmingData() {
List<Object[]> tests = new ArrayList<Object[]>();
// this functionality can be adapted to provide input data for whatever you might want in your data
final GenomeLoc loc = new UnvalidatingGenomeLoc("20", 0, 10, 20);
final String fullBases = "ACGTAACCGGT";
for ( int trimStart = loc.getStart(); trimStart < loc.getStop(); trimStart++ ) {
for ( int trimStop = trimStart; trimStop <= loc.getStop(); trimStop++ ) {
final int start = trimStart - loc.getStart();
final int stop = start + (trimStop - trimStart) + 1;
final GenomeLoc trimmedLoc = new UnvalidatingGenomeLoc("20", 0, start + loc.getStart(), stop + loc.getStart() - 1);
final String expectedBases = fullBases.substring(start, stop);
final Haplotype full = new Haplotype(fullBases.getBytes(), loc);
final Haplotype trimmed = new Haplotype(expectedBases.getBytes(), trimmedLoc);
final int hapStart = 10;
full.setAlignmentStartHapwrtRef(hapStart);
full.setCigar(TextCigarCodec.getSingleton().decode(full.length() + "M"));
trimmed.setAlignmentStartHapwrtRef(hapStart + start);
trimmed.setCigar(TextCigarCodec.getSingleton().decode(trimmed.length() + "M"));
tests.add(new Object[]{full, trimmedLoc, trimmed});
}
}
final Haplotype full = new Haplotype("ACT".getBytes(), new UnvalidatingGenomeLoc("20", 0, 10, 14));
full.setAlignmentStartHapwrtRef(10);
full.setCigar(TextCigarCodec.getSingleton().decode("1M2D2M"));
tests.add(new Object[]{full, new UnvalidatingGenomeLoc("20", 0, 11, 12), null});
tests.add(new Object[]{full, new UnvalidatingGenomeLoc("20", 0, 10, 12), null});
tests.add(new Object[]{full, new UnvalidatingGenomeLoc("20", 0, 11, 13), null});
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "TrimmingData")
public void testTrim(final Haplotype full, final GenomeLoc trimTo, final Haplotype expected) {
final Haplotype actual = full.trim(trimTo);
if ( expected != null ) {
Assert.assertEquals(actual.getBases(), expected.getBases());
Assert.assertEquals(actual.getStartPosition(), trimTo.getStart());
Assert.assertEquals(actual.getStopPosition(), trimTo.getStop());
Assert.assertEquals(actual.getCigar(), expected.getCigar());
Assert.assertEquals(actual.getAlignmentStartHapwrtRef(), expected.getAlignmentStartHapwrtRef());
} else {
Assert.assertNull(actual);
}
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testBadTrimLoc() {
final GenomeLoc loc = new UnvalidatingGenomeLoc("20", 0, 10, 20);
final Haplotype hap = new Haplotype("ACGTAACCGGT".getBytes(), loc);
hap.trim(new UnvalidatingGenomeLoc("20", 0, 1, 20));
}
@Test(expectedExceptions = IllegalStateException.class)
public void testBadTrimNoLoc() {
final Haplotype hap = new Haplotype("ACGTAACCGGT".getBytes());
hap.trim(new UnvalidatingGenomeLoc("20", 0, 1, 20));
}
}

View File

@ -27,7 +27,7 @@ package org.broadinstitute.sting.utils.haplotypeBAMWriter;
import net.sf.samtools.*;
import org.broadinstitute.sting.BaseTest;
import org.broadinstitute.sting.utils.Haplotype;
import org.broadinstitute.sting.utils.haplotype.Haplotype;
import org.broadinstitute.sting.utils.SWPairwiseAlignment;
import org.broadinstitute.sting.utils.Utils;
import org.broadinstitute.sting.utils.sam.AlignmentUtils;

View File

@ -51,7 +51,7 @@ import java.util.*;
* testing of the new (non-legacy) version of LocusIteratorByState
*/
public class LocusIteratorByStateUnitTest extends LocusIteratorByStateBaseTest {
private static final boolean DEBUG = true;
private static final boolean DEBUG = false;
protected LocusIteratorByState li;
@Test(enabled = true)
@ -361,7 +361,7 @@ public class LocusIteratorByStateUnitTest extends LocusIteratorByStateBaseTest {
// comprehensive LIBS/PileupElement tests //
////////////////////////////////////////////
@DataProvider(name = "LIBSTest")
@DataProvider(name = "MyLIBSTest")
public Object[][] makeLIBSTest() {
final List<Object[]> tests = new LinkedList<Object[]>();
@ -377,7 +377,7 @@ public class LocusIteratorByStateUnitTest extends LocusIteratorByStateBaseTest {
// Arrays.asList(3));
}
@Test(enabled = true && ! DEBUG, dataProvider = "LIBSTest")
@Test(enabled = ! DEBUG, dataProvider = "MyLIBSTest")
public void testLIBS(LIBSTest params) {
// create the iterator by state with the fake reads and fake records
final GATKSAMRecord read = params.makeRead();

View File

@ -948,4 +948,89 @@ public class AlignmentUtilsUnitTest {
Assert.assertEquals(actualEndPos, pos + elt.getLength());
Assert.assertEquals(AlignmentUtils.consolidateCigar(new Cigar(elts)), expectedCigar);
}
@DataProvider(name = "GetBasesCoveringRefIntervalData")
public Object[][] makeGetBasesCoveringRefIntervalData() {
List<Object[]> tests = new ArrayList<Object[]>();
// matches
// 0123
// ACGT
tests.add(new Object[]{"ACGT", 0, 3, "4M", "ACGT"});
tests.add(new Object[]{"ACGT", 1, 3, "4M", "CGT"});
tests.add(new Object[]{"ACGT", 1, 2, "4M", "CG"});
tests.add(new Object[]{"ACGT", 1, 1, "4M", "C"});
// deletions
// 012345
// AC--GT
tests.add(new Object[]{"ACGT", 0, 5, "2M2D2M", "ACGT"});
tests.add(new Object[]{"ACGT", 1, 5, "2M2D2M", "CGT"});
tests.add(new Object[]{"ACGT", 2, 5, "2M2D2M", null});
tests.add(new Object[]{"ACGT", 3, 5, "2M2D2M", null});
tests.add(new Object[]{"ACGT", 4, 5, "2M2D2M", "GT"});
tests.add(new Object[]{"ACGT", 5, 5, "2M2D2M", "T"});
tests.add(new Object[]{"ACGT", 0, 4, "2M2D2M", "ACG"});
tests.add(new Object[]{"ACGT", 0, 3, "2M2D2M", null});
tests.add(new Object[]{"ACGT", 0, 2, "2M2D2M", null});
tests.add(new Object[]{"ACGT", 0, 1, "2M2D2M", "AC"});
tests.add(new Object[]{"ACGT", 0, 0, "2M2D2M", "A"});
// insertions
// 01--23
// ACTTGT
tests.add(new Object[]{"ACTTGT", 0, 3, "2M2I2M", "ACTTGT"});
tests.add(new Object[]{"ACTTGT", 1, 3, "2M2I2M", "CTTGT"});
tests.add(new Object[]{"ACTTGT", 2, 3, "2M2I2M", "GT"});
tests.add(new Object[]{"ACTTGT", 3, 3, "2M2I2M", "T"});
tests.add(new Object[]{"ACTTGT", 0, 2, "2M2I2M", "ACTTG"});
tests.add(new Object[]{"ACTTGT", 0, 1, "2M2I2M", "AC"});
tests.add(new Object[]{"ACTTGT", 1, 2, "2M2I2M", "CTTG"});
tests.add(new Object[]{"ACTTGT", 2, 2, "2M2I2M", "G"});
tests.add(new Object[]{"ACTTGT", 1, 1, "2M2I2M", "C"});
tests.add(new Object[]{"ACGT", 0, 1, "2M2I", "AC"});
tests.add(new Object[]{"ACGT", 1, 1, "2M2I", "C"});
tests.add(new Object[]{"ACGT", 0, 0, "2M2I", "A"});
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "GetBasesCoveringRefIntervalData", enabled = true)
public void testGetBasesCoveringRefInterval(final String basesString, final int refStart, final int refEnd, final String cigarString, final String expected) {
final byte[] actualBytes = AlignmentUtils.getBasesCoveringRefInterval(refStart, refEnd, basesString.getBytes(), 0, TextCigarCodec.getSingleton().decode(cigarString));
if ( expected == null )
Assert.assertNull(actualBytes);
else
Assert.assertEquals(new String(actualBytes), expected);
}
@DataProvider(name = "StartsOrEndsWithInsertionOrDeletionData")
public Object[][] makeStartsOrEndsWithInsertionOrDeletionData() {
List<Object[]> tests = new ArrayList<Object[]>();
tests.add(new Object[]{"2M", false});
tests.add(new Object[]{"1D2M", true});
tests.add(new Object[]{"2M1D", true});
tests.add(new Object[]{"2M1I", true});
tests.add(new Object[]{"1I2M", true});
tests.add(new Object[]{"1M1I2M", false});
tests.add(new Object[]{"1M1D2M", false});
tests.add(new Object[]{"1M1I2M1I", true});
tests.add(new Object[]{"1M1I2M1D", true});
tests.add(new Object[]{"1D1M1I2M", true});
tests.add(new Object[]{"1I1M1I2M", true});
tests.add(new Object[]{"1M1I2M1I1M", false});
tests.add(new Object[]{"1M1I2M1D1M", false});
tests.add(new Object[]{"1M1D2M1D1M", false});
return tests.toArray(new Object[][]{});
}
@Test(dataProvider = "StartsOrEndsWithInsertionOrDeletionData", enabled = true)
public void testStartsOrEndsWithInsertionOrDeletion(final String cigar, final boolean expected) {
Assert.assertEquals(AlignmentUtils.startsOrEndsWithInsertionOrDeletion(TextCigarCodec.getSingleton().decode(cigar)), expected);
}
}

View File

@ -134,4 +134,12 @@ public class GATKSAMRecordUnitTest extends BaseTest {
read.setIsStrandless(true);
read.setReadNegativeStrandFlag(true);
}
@Test
public void testGetReducedCountsIsCorrect() {
final byte[] counts = reducedRead.getReducedReadCounts();
Assert.assertNotSame(counts, reducedRead.getAttribute(GATKSAMRecord.REDUCED_READ_CONSENSUS_TAG));
for ( int i = 0; i < counts.length; i++ )
Assert.assertEquals(counts[i], reducedRead.getReducedCount(i), "Reduced counts vector not equal to getReducedCount(i) at " + i);
}
}