fixing a bug where reads in overlapping interval based locus traversals could get assigned to only one of two the regions
git-svn-id: file:///humgen/gsa-scr1/gsa-engineering/svn_contents/trunk@1407 348d0f76-0448-11de-a6fe-93d51630548a
This commit is contained in:
parent
bb1d31914c
commit
fc1c76f1d2
|
|
@ -40,8 +40,12 @@ public class IntervalShard implements Shard {
|
|||
|
||||
/** a collection of genomic locations to interate over */
|
||||
private GenomeLoc mSet;
|
||||
private Shard.ShardType mType = Shard.ShardType.LOCUS_INTERVAL;
|
||||
|
||||
IntervalShard(GenomeLoc myLocation) {
|
||||
IntervalShard(GenomeLoc myLocation, Shard.ShardType intervalType) {
|
||||
if (intervalType != Shard.ShardType.LOCUS_INTERVAL && intervalType != Shard.ShardType.READ_INTERVAL)
|
||||
throw new IllegalArgumentException("The specified interval type must be either LOCUS_INTERVAL or READ_INTERVAL");
|
||||
mType = intervalType;
|
||||
mSet = myLocation.clone();
|
||||
}
|
||||
|
||||
|
|
@ -56,6 +60,6 @@ public class IntervalShard implements Shard {
|
|||
* @return READ, indicating the shard type
|
||||
*/
|
||||
public Shard.ShardType getShardType() {
|
||||
return ShardType.INTERVAL;
|
||||
return mType;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ public class IntervalShardStrategy implements ShardStrategy {
|
|||
|
||||
/** their prefered size of the shard, we can modify this based on what we see in the shards */
|
||||
private long size;
|
||||
|
||||
private Shard.ShardType type;
|
||||
/**
|
||||
* change the recommended shard size for the next shard we generate. The code will do it's
|
||||
* best to respect this value, but there are no guarantees.
|
||||
|
|
@ -66,10 +66,11 @@ public class IntervalShardStrategy implements ShardStrategy {
|
|||
* @param size
|
||||
* @param locations
|
||||
*/
|
||||
IntervalShardStrategy( long size, GenomeLocSortedSet locations ) {
|
||||
IntervalShardStrategy( long size, GenomeLocSortedSet locations, Shard.ShardType shardType ) {
|
||||
if (locations == null || locations.isEmpty()) {
|
||||
throw new StingException("IntervalShardStrategy: genomic regions list is empty.");
|
||||
}
|
||||
type = shardType;
|
||||
this.regions = locations.clone();
|
||||
this.size = size;
|
||||
}
|
||||
|
|
@ -110,7 +111,7 @@ public class IntervalShardStrategy implements ShardStrategy {
|
|||
GenomeLoc loc = regions.iterator().next();
|
||||
|
||||
regions.removeRegion(loc);
|
||||
return new IntervalShard(loc);
|
||||
return new IntervalShard(loc,type);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -173,11 +173,11 @@ public abstract class LocusShardStrategy implements ShardStrategy {
|
|||
|
||||
if (loc.getStop() - loc.getStart() <= proposedSize) {
|
||||
intervals.removeRegion(loc);
|
||||
return new IntervalShard(loc);
|
||||
return new IntervalShard(loc,Shard.ShardType.LOCUS_INTERVAL);
|
||||
} else {
|
||||
GenomeLoc subLoc = GenomeLocParser.createGenomeLoc(loc.getContigIndex(), loc.getStart(), loc.getStart() + proposedSize - 1);
|
||||
intervals.removeRegion(subLoc);
|
||||
return new IntervalShard(subLoc);
|
||||
return new IntervalShard(subLoc,Shard.ShardType.LOCUS_INTERVAL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import java.io.Serializable;
|
|||
*/
|
||||
public interface Shard extends Serializable {
|
||||
enum ShardType {
|
||||
READ, LOCUS, INTERVAL
|
||||
READ, LOCUS, READ_INTERVAL, LOCUS_INTERVAL
|
||||
}
|
||||
|
||||
/** @return the genome location represented by this shard */
|
||||
|
|
|
|||
|
|
@ -106,8 +106,9 @@ public class ShardStrategyFactory {
|
|||
case EXPONENTIAL:
|
||||
return new ExpGrowthLocusShardStrategy(dic, startingSize, lst, limitDataCount);
|
||||
case INTERVAL:
|
||||
return new IntervalShardStrategy(startingSize, lst, Shard.ShardType.LOCUS_INTERVAL);
|
||||
case READS:
|
||||
return new IntervalShardStrategy(startingSize, lst);
|
||||
return new IntervalShardStrategy(startingSize, lst, Shard.ShardType.READ_INTERVAL);
|
||||
default:
|
||||
throw new StingException("Strategy: " + strat + " isn't implemented");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ public class SAMDataSource implements SimpleDataSource {
|
|||
/**
|
||||
* A histogram of exactly what reads were removed from the input stream and why.
|
||||
*/
|
||||
private SAMReadViolationHistogram violations = new SAMReadViolationHistogram();
|
||||
private SAMReadViolationHistogram violations = new SAMReadViolationHistogram();
|
||||
|
||||
// A pool of SAM iterators.
|
||||
private SAMResourcePool resourcePool = null;
|
||||
|
|
@ -120,24 +120,24 @@ public class SAMDataSource implements SimpleDataSource {
|
|||
return resourcePool.getHeader();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns Reads data structure containing information about the reads data sources placed in this pool as well as
|
||||
* information about how they are downsampled, sorted, and filtered
|
||||
* @return
|
||||
*/
|
||||
public Reads getReadsInfo() { return reads; }
|
||||
|
||||
/**
|
||||
|
||||
/**
|
||||
* Returns header merger: a class that keeps the mapping between original read groups and read groups
|
||||
* of the merged stream; merger also provides access to the individual file readers (and hence headers
|
||||
* prior to the merging too) maintained by the system.
|
||||
* prior to the merging too) maintained by the system.
|
||||
* @return
|
||||
*/
|
||||
public SamFileHeaderMerger getHeaderMerger() { return resourcePool.getHeaderMerger(); }
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
* @param shard the shard to get data for
|
||||
*
|
||||
* @return an iterator for that region
|
||||
|
|
@ -162,7 +162,8 @@ public class SAMDataSource implements SimpleDataSource {
|
|||
reads.getDownsamplingFraction(),
|
||||
reads.getSafetyChecking(),
|
||||
reads.getSupplementalFilters());
|
||||
} else if (shard.getShardType() == Shard.ShardType.INTERVAL) {
|
||||
} else if ((shard.getShardType() == Shard.ShardType.LOCUS_INTERVAL) ||
|
||||
(shard.getShardType() == Shard.ShardType.READ_INTERVAL)) {
|
||||
iterator = seekLocus(shard.getGenomeLoc());
|
||||
iterator = applyDecoratingIterators(false,
|
||||
iterator,
|
||||
|
|
@ -170,8 +171,8 @@ public class SAMDataSource implements SimpleDataSource {
|
|||
reads.getSafetyChecking(),
|
||||
reads.getSupplementalFilters());
|
||||
|
||||
// add the new overlapping detection iterator, if we have a last interval
|
||||
if (mLastInterval != null && queryOverlapping) iterator = new IntervalOverlapIterator(iterator,mLastInterval,false);
|
||||
// add the new overlapping detection iterator, if we have a last interval and we're a read based shard
|
||||
if (mLastInterval != null && shard.getShardType() == Shard.ShardType.READ_INTERVAL ) iterator = new IntervalOverlapIterator(iterator,mLastInterval,false);
|
||||
mLastInterval = shard.getGenomeLoc();
|
||||
} else {
|
||||
|
||||
|
|
@ -258,7 +259,7 @@ public class SAMDataSource implements SimpleDataSource {
|
|||
*/
|
||||
void setResourcePool( SAMResourcePool resourcePool ) {
|
||||
this.resourcePool = resourcePool;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve unmapped reads.
|
||||
|
|
|
|||
Loading…
Reference in New Issue