minimap2/map.c

939 lines
30 KiB
C
Raw Normal View History

2017-04-10 02:59:39 +08:00
#include <stdlib.h>
2017-04-26 19:36:46 +08:00
#include <string.h>
2017-06-30 07:35:38 +08:00
#include <assert.h>
#include <errno.h>
2017-04-26 19:36:46 +08:00
#include "kthread.h"
2017-04-10 02:59:39 +08:00
#include "kvec.h"
#include "kalloc.h"
#include "sdust.h"
2017-06-06 22:16:33 +08:00
#include "mmpriv.h"
2017-04-26 19:36:46 +08:00
#include "bseq.h"
2017-10-05 01:05:18 +08:00
#include "khash.h"
2017-04-08 03:56:10 +08:00
#ifdef SHOW_PERF
extern int64_t get_mseconds();
extern int64_t time_mm_map_file_frag,
time_map_work_for_block_1,
time_map_work_for_block_2,
time_map_work_for_block_3,
time_mm_map_frag_b1,
time_mm_map_frag_b2,
time_mm_map_frag_b3,
time_mm_map_frag_b4,
time_mm_map_frag_b5,
time_mm_map_frag_b6,
time_mg_lchain_dp,
time_collect_seed_hits_heap,
time_collect_seed_hits,
time_mg_chain_backtrack;
#endif
2017-04-10 02:59:39 +08:00
mm_tbuf_t *mm_tbuf_init(void)
{
mm_tbuf_t *b;
b = (mm_tbuf_t *)calloc(1, sizeof(mm_tbuf_t));
if (!(mm_dbg_flag & 1))
b->km = km_init();
2017-04-10 02:59:39 +08:00
return b;
}
void mm_tbuf_destroy(mm_tbuf_t *b)
{
if (b == 0)
return;
2017-04-10 02:59:39 +08:00
km_destroy(b->km);
free(b);
}
2018-07-25 11:29:55 +08:00
void *mm_tbuf_get_km(mm_tbuf_t *b)
{
return b->km;
}
static int mm_dust_minier(void *km, int n, mm128_t *a, int l_seq, const char *seq, int sdust_thres)
2017-04-10 02:59:39 +08:00
{
2017-04-26 19:36:46 +08:00
int n_dreg, j, k, u = 0;
const uint64_t *dreg;
sdust_buf_t *sdb;
if (sdust_thres <= 0)
return n;
sdb = sdust_buf_init(km);
dreg = sdust_core((const uint8_t *)seq, l_seq, sdust_thres, 64, &n_dreg, sdb);
for (j = k = 0; j < n; ++j)
{ // squeeze out minimizers that significantly overlap with LCRs
int32_t qpos = (uint32_t)a[j].y >> 1, span = a[j].x & 0xff;
2017-04-26 19:36:46 +08:00
int32_t s = qpos - (span - 1), e = s + span;
while (u < n_dreg && (int32_t)dreg[u] <= s)
++u;
if (u < n_dreg && (int32_t)(dreg[u] >> 32) < e)
{
2017-04-26 19:36:46 +08:00
int v, l = 0;
for (v = u; v < n_dreg && (int32_t)(dreg[v] >> 32) < e; ++v)
{ // iterate over LCRs overlapping this minimizer
int ss = s > (int32_t)(dreg[v] >> 32) ? s : dreg[v] >> 32;
int ee = e < (int32_t)dreg[v] ? e : (uint32_t)dreg[v];
2017-04-26 19:36:46 +08:00
l += ee - ss;
}
if (l <= span >> 1)
a[k++] = a[j]; // keep the minimizer if less than half of it falls in masked region
}
else
a[k++] = a[j];
2017-04-26 19:36:46 +08:00
}
sdust_buf_destroy(sdb);
2017-09-20 04:18:28 +08:00
return k; // the new size
2017-04-26 19:36:46 +08:00
}
2017-04-28 18:24:45 +08:00
static void collect_minimizers(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, int n_segs, const int *qlens, const char **seqs, mm128_v *mv)
2017-05-03 20:47:29 +08:00
{
int i, n, sum = 0;
mv->n = 0;
for (i = n = 0; i < n_segs; ++i)
{
size_t j;
mm_sketch(km, seqs[i], qlens[i], mi->w, mi->k, i, mi->flag & MM_I_HPC, mv);
for (j = n; j < mv->n; ++j)
mv->a[j].y += sum << 1;
2017-09-20 04:18:28 +08:00
if (opt->sdust_thres > 0) // mask low-complexity minimizers
mv->n = n + mm_dust_minier(km, mv->n - n, mv->a + n, qlens[i], seqs[i], opt->sdust_thres);
sum += qlens[i], n = mv->n;
2017-09-20 04:18:28 +08:00
}
}
2018-01-26 10:52:49 +08:00
#include "ksort.h"
#define heap_lt(a, b) ((a).x > (b).x)
KSORT_INIT(heap, mm128_t, heap_lt)
2021-04-30 23:24:53 +08:00
static inline int skip_seed(int flag, uint64_t r, const mm_seed_t *q, const char *qname, int qlen, const mm_idx_t *mi, int *is_self)
2018-01-26 22:21:45 +08:00
{
*is_self = 0;
if (qname && (flag & (MM_F_NO_DIAG | MM_F_NO_DUAL)))
{
const mm_idx_seq_t *s = &mi->seq[r >> 32];
2018-01-26 22:21:45 +08:00
int cmp;
cmp = strcmp(qname, s->name);
if ((flag & MM_F_NO_DIAG) && cmp == 0 && (int)s->len == qlen)
{
if ((uint32_t)r >> 1 == (q->q_pos >> 1))
return 1; // avoid the diagnonal anchors
if ((r & 1) == (q->q_pos & 1))
*is_self = 1; // this flag is used to avoid spurious extension on self chain
}
if ((flag & MM_F_NO_DUAL) && cmp > 0) // all-vs-all mode: map once
2018-01-26 22:21:45 +08:00
return 1;
}
if (flag & (MM_F_FOR_ONLY | MM_F_REV_ONLY))
{
if ((r & 1) == (q->q_pos & 1))
{ // forward strand
if (flag & MM_F_REV_ONLY)
return 1;
}
else
{
if (flag & MM_F_FOR_ONLY)
return 1;
2018-01-26 22:21:45 +08:00
}
}
return 0;
}
2018-01-26 10:52:49 +08:00
2018-01-26 22:21:45 +08:00
static mm128_t *collect_seed_hits_heap(void *km, const mm_mapopt_t *opt, int max_occ, const mm_idx_t *mi, const char *qname, const mm128_v *mv, int qlen, int64_t *n_a, int *rep_len,
int *n_mini_pos, uint64_t **mini_pos)
2018-01-26 22:21:45 +08:00
{
int i, n_m, heap_size = 0;
int64_t j, n_for = 0, n_rev = 0;
2021-04-30 23:24:53 +08:00
mm_seed_t *m;
2018-01-26 22:21:45 +08:00
mm128_t *a, *heap;
#ifdef SHOW_PERF
int64_t tmp_cur_time = get_mseconds(), tmp_diff = 0;
#endif
2021-05-01 05:33:16 +08:00
m = mm_collect_matches(km, &n_m, qlen, max_occ, opt->max_max_occ, opt->occ_dist, mi, mv, n_a, rep_len, n_mini_pos, mini_pos);
2018-01-26 22:37:48 +08:00
heap = (mm128_t *)kmalloc(km, n_m * sizeof(mm128_t));
a = (mm128_t *)kmalloc(km, *n_a * sizeof(mm128_t));
2018-01-26 22:21:45 +08:00
for (i = 0, heap_size = 0; i < n_m; ++i)
{
if (m[i].n > 0)
{
2018-01-26 22:21:45 +08:00
heap[heap_size].x = m[i].cr[0];
heap[heap_size].y = (uint64_t)i << 32;
2018-01-26 22:21:45 +08:00
++heap_size;
}
2018-01-26 10:52:49 +08:00
}
2018-01-26 22:21:45 +08:00
ks_heapmake_heap(heap_size, heap);
while (heap_size > 0)
{
mm_seed_t *q = &m[heap->y >> 32];
2018-01-26 10:52:49 +08:00
mm128_t *p;
uint64_t r = heap->x;
int32_t is_self, rpos = (uint32_t)r >> 1;
if (!skip_seed(opt->flag, r, q, qname, qlen, mi, &is_self))
{
if ((r & 1) == (q->q_pos & 1))
{ // forward strand
p = &a[n_for++];
p->x = (r & 0xffffffff00000000ULL) | rpos;
p->y = (uint64_t)q->q_span << 32 | q->q_pos >> 1;
}
else
{ // reverse strand
p = &a[(*n_a) - (++n_rev)];
p->x = 1ULL << 63 | (r & 0xffffffff00000000ULL) | rpos;
p->y = (uint64_t)q->q_span << 32 | (qlen - ((q->q_pos >> 1) + 1 - q->q_span) - 1);
}
p->y |= (uint64_t)q->seg_id << MM_SEED_SEG_SHIFT;
if (q->is_tandem)
p->y |= MM_SEED_TANDEM;
if (is_self)
p->y |= MM_SEED_SELF;
2018-01-26 10:52:49 +08:00
}
// update the heap
if ((uint32_t)heap->y < q->n - 1)
{
2018-01-26 10:52:49 +08:00
++heap[0].y;
heap[0].x = m[heap[0].y >> 32].cr[(uint32_t)heap[0].y];
}
else
{
2018-01-26 10:52:49 +08:00
heap[0] = heap[heap_size - 1];
--heap_size;
}
ks_heapdown_heap(0, heap_size, heap);
}
kfree(km, m);
kfree(km, heap);
// reverse anchors on the reverse strand, as they are in the descending order
for (j = 0; j < n_rev >> 1; ++j)
{
2018-01-26 10:52:49 +08:00
mm128_t t = a[(*n_a) - 1 - j];
a[(*n_a) - 1 - j] = a[(*n_a) - (n_rev - j)];
a[(*n_a) - (n_rev - j)] = t;
}
if (*n_a > n_for + n_rev)
{
2018-01-26 10:52:49 +08:00
memmove(a + n_for, a + (*n_a) - n_rev, n_rev * sizeof(mm128_t));
*n_a = n_for + n_rev;
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_collect_seed_hits_heap, tmp_diff);
#endif
2018-01-26 10:52:49 +08:00
return a;
}
static mm128_t *collect_seed_hits(void *km, const mm_mapopt_t *opt, int max_occ, const mm_idx_t *mi, const char *qname, const mm128_v *mv, int qlen, int64_t *n_a, int *rep_len,
int *n_mini_pos, uint64_t **mini_pos)
2017-09-20 04:18:28 +08:00
{
int i, n_m;
2021-04-30 23:24:53 +08:00
mm_seed_t *m;
2017-05-03 20:47:29 +08:00
mm128_t *a;
#ifdef SHOW_PERF
int64_t tmp_cur_time = get_mseconds(), tmp_diff = 0;
#endif
2021-05-01 05:33:16 +08:00
m = mm_collect_matches(km, &n_m, qlen, max_occ, opt->max_max_occ, opt->occ_dist, mi, mv, n_a, rep_len, n_mini_pos, mini_pos);
a = (mm128_t *)kmalloc(km, *n_a * sizeof(mm128_t));
for (i = 0, *n_a = 0; i < n_m; ++i)
{
2021-04-30 23:24:53 +08:00
mm_seed_t *q = &m[i];
const uint64_t *r = q->cr;
uint32_t k;
for (k = 0; k < q->n; ++k)
{
int32_t is_self, rpos = (uint32_t)r[k] >> 1;
2017-05-03 20:47:29 +08:00
mm128_t *p;
if (skip_seed(opt->flag, r[k], q, qname, qlen, mi, &is_self))
continue;
2017-09-20 04:18:28 +08:00
p = &a[(*n_a)++];
if ((r[k] & 1) == (q->q_pos & 1))
{ // forward strand
p->x = (r[k] & 0xffffffff00000000ULL) | rpos;
2018-01-26 22:37:48 +08:00
p->y = (uint64_t)q->q_span << 32 | q->q_pos >> 1;
}
else if (!(opt->flag & MM_F_QSTRAND))
{ // reverse strand and not in the query-strand mode
p->x = 1ULL << 63 | (r[k] & 0xffffffff00000000ULL) | rpos;
p->y = (uint64_t)q->q_span << 32 | (qlen - ((q->q_pos >> 1) + 1 - q->q_span) - 1);
}
else
{ // reverse strand; query-strand
int32_t len = mi->seq[r[k] >> 32].len;
p->x = 1ULL << 63 | (r[k] & 0xffffffff00000000ULL) | (len - (rpos + 1 - q->q_span) - 1); // coordinate only accurate for non-HPC seeds
p->y = (uint64_t)q->q_span << 32 | q->q_pos >> 1;
2017-05-03 20:47:29 +08:00
}
2017-09-20 04:18:28 +08:00
p->y |= (uint64_t)q->seg_id << MM_SEED_SEG_SHIFT;
if (q->is_tandem)
p->y |= MM_SEED_TANDEM;
if (is_self)
p->y |= MM_SEED_SELF;
2017-05-03 20:47:29 +08:00
}
}
kfree(km, m);
2018-01-26 10:52:49 +08:00
radix_sort_128x(a, a + (*n_a));
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_collect_seed_hits, tmp_diff);
#endif
2017-09-20 04:18:28 +08:00
return a;
}
static void chain_post(const mm_mapopt_t *opt, int max_chain_gap_ref, const mm_idx_t *mi, void *km, int qlen, int n_segs, const int *qlens, int *n_regs, mm_reg1_t *regs, mm128_t *a)
2017-09-20 04:18:28 +08:00
{
if (!(opt->flag & MM_F_ALL_CHAINS))
{ // don't choose primary mapping(s)
mm_set_parent(km, opt->mask_level, opt->mask_len, *n_regs, regs, opt->a * 2 + opt->b, opt->flag & MM_F_HARD_MLEVEL, opt->alt_drop);
if (n_segs <= 1)
mm_select_sub(km, opt->pri_ratio, mi->k * 2, opt->best_n, 1, opt->max_gap * 0.8, n_regs, regs);
else
mm_select_sub_multi(km, opt->pri_ratio, 0.2f, 0.7f, max_chain_gap_ref, mi->k * 2, opt->best_n, n_segs, qlens, n_regs, regs);
2017-09-21 02:35:46 +08:00
}
}
static mm_reg1_t *align_regs(const mm_mapopt_t *opt, const mm_idx_t *mi, void *km, int qlen, const char *seq, int *n_regs, mm_reg1_t *regs, mm128_t *a)
2017-09-21 02:35:46 +08:00
{
if (!(opt->flag & MM_F_CIGAR))
return regs;
2017-12-11 06:54:50 +08:00
regs = mm_align_skeleton(km, opt, mi, qlen, seq, n_regs, regs, a); // this calls mm_filter_regs()
if (!(opt->flag & MM_F_ALL_CHAINS))
{ // don't choose primary mapping(s)
mm_set_parent(km, opt->mask_level, opt->mask_len, *n_regs, regs, opt->a * 2 + opt->b, opt->flag & MM_F_HARD_MLEVEL, opt->alt_drop);
mm_select_sub(km, opt->pri_ratio, mi->k * 2, opt->best_n, 0, opt->max_gap * 0.8, n_regs, regs);
2017-09-21 02:35:46 +08:00
mm_set_sam_pri(*n_regs, regs);
}
return regs;
}
2018-07-15 11:43:34 +08:00
void mm_map_frag(const mm_idx_t *mi, int n_segs, const int *qlens, const char **seqs, int *n_regs, mm_reg1_t **regs, mm_tbuf_t *b, const mm_mapopt_t *opt, const char *qname)
2017-09-21 02:35:46 +08:00
{
int i, j, rep_len, qlen_sum, n_regs0, n_mini_pos;
2017-11-01 02:25:09 +08:00
int max_chain_gap_qry, max_chain_gap_ref, is_splice = !!(opt->flag & MM_F_SPLICE), is_sr = !!(opt->flag & MM_F_SR);
2017-10-05 01:05:18 +08:00
uint32_t hash;
2017-09-20 04:18:28 +08:00
int64_t n_a;
uint64_t *u, *mini_pos;
2017-09-20 04:18:28 +08:00
mm128_t *a;
mm128_v mv = {0, 0, 0};
2017-09-21 02:35:46 +08:00
mm_reg1_t *regs0;
km_stat_t kmst;
2021-10-02 04:58:03 +08:00
float chn_pen_gap, chn_pen_skip;
#ifdef SHOW_PERF
int64_t tmp_cur_time = get_mseconds(), tmp_diff = 0;
#endif
2017-09-21 02:35:46 +08:00
for (i = 0, qlen_sum = 0; i < n_segs; ++i)
qlen_sum += qlens[i], n_regs[i] = 0, regs[i] = 0;
2017-09-20 04:18:28 +08:00
if (qlen_sum == 0 || n_segs <= 0 || n_segs > MM_MAX_SEG)
return;
if (opt->max_qlen > 0 && qlen_sum > opt->max_qlen)
return;
hash = qname && !(opt->flag & MM_F_NO_HASH_NAME) ? __ac_X31_hash_string(qname) : 0;
2017-10-05 01:05:18 +08:00
hash ^= __ac_Wang_hash(qlen_sum) + __ac_Wang_hash(opt->seed);
hash = __ac_Wang_hash(hash);
2017-10-05 01:05:18 +08:00
collect_minimizers(b->km, opt, mi, n_segs, qlens, seqs, &mv);
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_mm_map_frag_b1, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
if (opt->q_occ_frac > 0.0f)
mm_seed_mz_flt(b->km, &mv, opt->mid_occ, opt->q_occ_frac); // 过滤掉出现次数太多的minimizer
if (opt->flag & MM_F_HEAP_SORT)
a = collect_seed_hits_heap(b->km, opt, opt->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos);
else
a = collect_seed_hits(b->km, opt, opt->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos);
if (mm_dbg_flag & MM_DBG_PRINT_SEED)
{
fprintf(stderr, "RS\t%d\n", rep_len);
for (i = 0; i < n_a; ++i)
fprintf(stderr, "SD\t%s\t%d\t%c\t%d\t%d\t%d\n", mi->seq[a[i].x << 1 >> 33].name, (int32_t)a[i].x, "+-"[a[i].x >> 63], (int32_t)a[i].y, (int32_t)(a[i].y >> 32 & 0xff),
i == 0 ? 0 : ((int32_t)a[i].y - (int32_t)a[i - 1].y) - ((int32_t)a[i].x - (int32_t)a[i - 1].x));
2017-09-14 03:32:39 +08:00
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_mm_map_frag_b2, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
// set max chaining gap on the query and the reference sequence
2017-11-01 02:25:09 +08:00
if (is_sr)
max_chain_gap_qry = qlen_sum > opt->max_gap ? qlen_sum : opt->max_gap;
else
max_chain_gap_qry = opt->max_gap;
if (opt->max_gap_ref > 0)
{
max_chain_gap_ref = opt->max_gap_ref; // always honor mm_mapopt_t::max_gap_ref if set
}
else if (opt->max_frag_len > 0)
{
max_chain_gap_ref = opt->max_frag_len - qlen_sum;
if (max_chain_gap_ref < opt->max_gap)
max_chain_gap_ref = opt->max_gap;
}
else
max_chain_gap_ref = opt->max_gap;
chn_pen_gap = opt->chain_gap_scale * 0.01 * mi->k;
2021-10-02 04:58:03 +08:00
chn_pen_skip = opt->chain_skip_scale * 0.01 * mi->k;
if (opt->flag & MM_F_RMQ)
{
2021-05-03 21:27:04 +08:00
a = mg_lchain_rmq(opt->max_gap, opt->rmq_inner_dist, opt->bw, opt->max_chain_skip, opt->rmq_size_cap, opt->min_cnt, opt->min_chain_score,
2021-10-02 04:58:03 +08:00
chn_pen_gap, chn_pen_skip, n_a, a, &n_regs0, &u, b->km);
}
else
{
2021-05-03 21:27:04 +08:00
a = mg_lchain_dp(max_chain_gap_ref, max_chain_gap_qry, opt->bw, opt->max_chain_skip, opt->max_chain_iter, opt->min_cnt, opt->min_chain_score,
2021-10-02 04:58:03 +08:00
chn_pen_gap, chn_pen_skip, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km);
2021-05-03 21:27:04 +08:00
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_mm_map_frag_b3, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
if (opt->bw_long > opt->bw && (opt->flag & (MM_F_SPLICE | MM_F_SR | MM_F_NO_LJOIN)) == 0 && n_segs == 1 && n_regs0 > 1)
{ // re-chain/long-join for long sequences
2021-05-27 22:55:13 +08:00
int32_t st = (int32_t)a[0].y, en = (int32_t)a[(int32_t)u[0] - 1].y;
if (qlen_sum - (en - st) > opt->rmq_rescue_size || en - st > qlen_sum * opt->rmq_rescue_ratio)
{
2021-05-27 22:55:13 +08:00
int32_t i;
for (i = 0, n_a = 0; i < n_regs0; ++i)
n_a += (int32_t)u[i];
2021-05-27 22:55:13 +08:00
kfree(b->km, u);
radix_sort_128x(a, a + n_a);
a = mg_lchain_rmq(opt->max_gap, opt->rmq_inner_dist, opt->bw_long, opt->max_chain_skip, opt->rmq_size_cap, opt->min_cnt, opt->min_chain_score,
2021-10-02 04:58:03 +08:00
chn_pen_gap, chn_pen_skip, n_a, a, &n_regs0, &u, b->km);
2021-05-27 22:55:13 +08:00
}
}
else if (opt->max_occ > opt->mid_occ && rep_len > 0 && !(opt->flag & MM_F_RMQ))
{ // re-chain, mostly for short reads
2017-10-06 04:15:14 +08:00
int rechain = 0;
if (n_regs0 > 0)
{ // test if the best chain has all the segments
int n_chained_segs = 1, max = 0, max_i = -1, max_off = -1, off = 0;
for (i = 0; i < n_regs0; ++i)
{ // find the best chain
if (max < (int)(u[i] >> 32))
max = u[i] >> 32, max_i = i, max_off = off;
off += (uint32_t)u[i];
}
for (i = 1; i < (int32_t)u[max_i]; ++i) // count the number of segments in the best chain
if ((a[max_off + i].y & MM_SEED_SEG_MASK) != (a[max_off + i - 1].y & MM_SEED_SEG_MASK))
++n_chained_segs;
if (n_chained_segs < n_segs)
rechain = 1;
}
else
rechain = 1;
if (rechain)
{ // redo chaining with a higher max_occ threshold
kfree(b->km, a);
kfree(b->km, u);
kfree(b->km, mini_pos);
if (opt->flag & MM_F_HEAP_SORT)
a = collect_seed_hits_heap(b->km, opt, opt->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos);
else
a = collect_seed_hits(b->km, opt, opt->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos);
a = mg_lchain_dp(max_chain_gap_ref, max_chain_gap_qry, opt->bw, opt->max_chain_skip, opt->max_chain_iter, opt->min_cnt, opt->min_chain_score,
2021-10-02 04:58:03 +08:00
chn_pen_gap, chn_pen_skip, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km);
}
}
2018-07-15 11:43:34 +08:00
b->frag_gap = max_chain_gap_ref;
b->rep_len = rep_len;
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_mm_map_frag_b4, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
regs0 = mm_gen_regs(b->km, hash, qlen_sum, n_regs0, u, a, !!(opt->flag & MM_F_QSTRAND));
if (mi->n_alt)
{
2020-01-21 08:32:31 +08:00
mm_mark_alt(mi, n_regs0, regs0);
2020-01-21 23:33:39 +08:00
mm_hit_sort(b->km, &n_regs0, regs0, opt->alt_drop); // this step can be merged into mm_gen_regs(); will do if this shows up in profile
2020-01-21 08:32:31 +08:00
}
if (mm_dbg_flag & (MM_DBG_PRINT_SEED | MM_DBG_PRINT_CHAIN))
2017-09-21 02:58:57 +08:00
for (j = 0; j < n_regs0; ++j)
2017-09-21 02:35:46 +08:00
for (i = regs0[j].as; i < regs0[j].as + regs0[j].cnt; ++i)
fprintf(stderr, "CN\t%d\t%s\t%d\t%c\t%d\t%d\t%d\n", j, mi->seq[a[i].x << 1 >> 33].name, (int32_t)a[i].x, "+-"[a[i].x >> 63], (int32_t)a[i].y, (int32_t)(a[i].y >> 32 & 0xff),
i == regs0[j].as ? 0 : ((int32_t)a[i].y - (int32_t)a[i - 1].y) - ((int32_t)a[i].x - (int32_t)a[i - 1].x));
chain_post(opt, max_chain_gap_ref, mi, b->km, qlen_sum, n_segs, qlens, &n_regs0, regs0, a);
if (!is_sr && !(opt->flag & MM_F_QSTRAND))
{
2020-04-22 01:21:03 +08:00
mm_est_err(mi, qlen_sum, n_regs0, regs0, a, n_mini_pos, mini_pos);
n_regs0 = mm_filter_strand_retained(n_regs0, regs0);
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_mm_map_frag_b5, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
if (n_segs == 1)
{ // uni-segment
regs0 = align_regs(opt, mi, b->km, qlens[0], seqs[0], &n_regs0, regs0, a);
regs0 = (mm_reg1_t *)realloc(regs0, sizeof(*regs0) * n_regs0);
mm_set_mapq(b->km, n_regs0, regs0, opt->min_chain_score, opt->a, rep_len, is_sr);
2017-09-21 02:35:46 +08:00
n_regs[0] = n_regs0, regs[0] = regs0;
}
else
{ // multi-segment
2017-09-21 02:35:46 +08:00
mm_seg_t *seg;
2017-10-06 04:15:14 +08:00
seg = mm_seg_gen(b->km, hash, n_segs, qlens, n_regs0, regs0, n_regs, regs, a); // split fragment chain to separate segment chains
2017-09-21 02:35:46 +08:00
free(regs0);
for (i = 0; i < n_segs; ++i)
{
mm_set_parent(b->km, opt->mask_level, opt->mask_len, n_regs[i], regs[i], opt->a * 2 + opt->b, opt->flag & MM_F_HARD_MLEVEL, opt->alt_drop); // update mm_reg1_t::parent
regs[i] = align_regs(opt, mi, b->km, qlens[i], seqs[i], &n_regs[i], regs[i], seg[i].a);
mm_set_mapq(b->km, n_regs[i], regs[i], opt->min_chain_score, opt->a, rep_len, is_sr);
}
2017-09-21 02:35:46 +08:00
mm_seg_free(b->km, n_segs, seg);
if (n_segs == 2 && opt->pe_ori >= 0 && (opt->flag & MM_F_CIGAR))
mm_pair(b->km, max_chain_gap_ref, opt->pe_bonus, opt->a * 2 + opt->b, opt->a, qlens, n_regs, regs); // pairing
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_mm_map_frag_b6, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
kfree(b->km, mv.a);
2017-06-08 02:18:32 +08:00
kfree(b->km, a);
kfree(b->km, u);
kfree(b->km, mini_pos);
if (b->km)
{
km_stat(b->km, &kmst);
if (mm_dbg_flag & MM_DBG_PRINT_QNAME)
fprintf(stderr, "QM\t%s\t%d\tcap=%ld,nCore=%ld,largest=%ld\n", qname, qlen_sum, kmst.capacity, kmst.n_cores, kmst.largest);
assert(kmst.n_blocks == kmst.n_cores); // otherwise, there is a memory leak
if (kmst.largest > 1U << 28 || (opt->cap_kalloc > 0 && kmst.capacity > opt->cap_kalloc))
{
2021-07-20 09:20:04 +08:00
if (mm_dbg_flag & MM_DBG_PRINT_QNAME)
fprintf(stderr, "[W::%s] reset thread-local memory after read %s\n", __func__, qname);
km_destroy(b->km);
b->km = km_init();
}
}
2017-04-26 19:36:46 +08:00
}
2017-09-20 04:18:28 +08:00
mm_reg1_t *mm_map(const mm_idx_t *mi, int qlen, const char *seq, int *n_regs, mm_tbuf_t *b, const mm_mapopt_t *opt, const char *qname)
{
2017-09-21 02:35:46 +08:00
mm_reg1_t *regs;
mm_map_frag(mi, 1, &qlen, &seq, n_regs, &regs, b, opt, qname);
2017-09-21 02:35:46 +08:00
return regs;
2017-09-20 04:18:28 +08:00
}
2017-04-26 19:36:46 +08:00
/**************************
* Multi-threaded mapping *
**************************/
typedef struct
{
int n_processed, n_threads, n_fp;
int64_t mini_batch_size;
2017-04-26 19:36:46 +08:00
const mm_mapopt_t *opt;
mm_bseq_file_t **fp;
2017-04-26 19:36:46 +08:00
const mm_idx_t *mi;
kstring_t str;
2018-07-15 10:52:36 +08:00
int n_parts;
2018-07-15 22:55:00 +08:00
uint32_t *rid_shift;
2018-07-15 10:52:36 +08:00
FILE *fp_split, **fp_parts;
2017-04-26 19:36:46 +08:00
} pipeline_t;
typedef struct
{
2017-04-26 19:36:46 +08:00
const pipeline_t *p;
int n_seq, n_frag;
mm_bseq1_t *seq;
2018-07-15 11:43:34 +08:00
int *n_reg, *seg_off, *n_seg, *rep_len, *frag_gap;
2017-04-26 19:36:46 +08:00
mm_reg1_t **reg;
mm_tbuf_t **buf;
} step_t;
static void worker_for(void *_data, long i, int tid) // kt_for() callback
{
step_t *s = (step_t *)_data;
int qlens[MM_MAX_SEG], j, off = s->seg_off[i], pe_ori = s->p->opt->pe_ori;
const char *qseqs[MM_MAX_SEG];
double t = 0.0;
mm_tbuf_t *b = s->buf[tid];
assert(s->n_seg[i] <= MM_MAX_SEG);
if (mm_dbg_flag & MM_DBG_PRINT_QNAME)
{
fprintf(stderr, "QR\t%s\t%d\t%d\n", s->seq[off].name, tid, s->seq[off].l_seq);
t = realtime();
}
#ifdef SHOW_PERF
int64_t tmp_cur_time = get_mseconds(), tmp_diff = 0;
#endif
for (j = 0; j < s->n_seg[i]; ++j)
{
if (s->n_seg[i] == 2 && ((j == 0 && (pe_ori >> 1 & 1)) || (j == 1 && (pe_ori & 1))))
2017-09-26 01:42:04 +08:00
mm_revcomp_bseq(&s->seq[off + j]);
qlens[j] = s->seq[off + j].l_seq;
qseqs[j] = s->seq[off + j].seq;
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_map_work_for_block_1, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
if (s->p->opt->flag & MM_F_INDEPEND_SEG)
{
for (j = 0; j < s->n_seg[i]; ++j)
{
mm_map_frag(s->p->mi, 1, &qlens[j], &qseqs[j], &s->n_reg[off + j], &s->reg[off + j], b, s->p->opt, s->seq[off + j].name);
2018-07-15 11:43:34 +08:00
s->rep_len[off + j] = b->rep_len;
s->frag_gap[off + j] = b->frag_gap;
}
}
else
{
2018-07-15 11:43:34 +08:00
mm_map_frag(s->p->mi, s->n_seg[i], qlens, qseqs, &s->n_reg[off], &s->reg[off], b, s->p->opt, s->seq[off].name);
for (j = 0; j < s->n_seg[i]; ++j)
{
2018-07-15 11:43:34 +08:00
s->rep_len[off + j] = b->rep_len;
s->frag_gap[off + j] = b->frag_gap;
}
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_map_work_for_block_2, tmp_diff);
tmp_cur_time = get_mseconds();
#endif
for (j = 0; j < s->n_seg[i]; ++j) // flip the query strand and coordinate to the original read strand
if (s->n_seg[i] == 2 && ((j == 0 && (pe_ori >> 1 & 1)) || (j == 1 && (pe_ori & 1))))
{
2017-09-26 01:42:04 +08:00
int k, t;
mm_revcomp_bseq(&s->seq[off + j]);
for (k = 0; k < s->n_reg[off + j]; ++k)
{
2017-09-26 01:42:04 +08:00
mm_reg1_t *r = &s->reg[off + j][k];
t = r->qs;
r->qs = qlens[j] - r->qe;
r->qe = qlens[j] - t;
r->rev = !r->rev;
}
}
#ifdef SHOW_PERF
tmp_diff = get_mseconds() - tmp_cur_time;
__sync_fetch_and_add(&time_map_work_for_block_3, tmp_diff);
#endif
if (mm_dbg_flag & MM_DBG_PRINT_QNAME)
fprintf(stderr, "QT\t%s\t%d\t%.6f\n", s->seq[off].name, tid, realtime() - t);
2017-04-26 19:36:46 +08:00
}
2018-07-15 10:52:36 +08:00
static void merge_hits(step_t *s)
{
2018-07-15 11:43:34 +08:00
int f, i, k0, k, max_seg = 0, *n_reg_part, *rep_len_part, *frag_gap_part, *qlens;
2018-07-15 10:52:36 +08:00
void *km;
FILE **fp = s->p->fp_parts;
const mm_mapopt_t *opt = s->p->opt;
km = km_init();
2018-07-15 11:43:34 +08:00
for (f = 0; f < s->n_frag; ++f)
max_seg = max_seg > s->n_seg[f] ? max_seg : s->n_seg[f];
2018-07-15 11:43:34 +08:00
qlens = CALLOC(int, max_seg + s->p->n_parts * 3);
n_reg_part = qlens + max_seg;
2018-07-15 10:52:36 +08:00
rep_len_part = n_reg_part + s->p->n_parts;
2018-07-15 11:43:34 +08:00
frag_gap_part = rep_len_part + s->p->n_parts;
for (f = 0, k = k0 = 0; f < s->n_frag; ++f)
{
2018-07-15 11:43:34 +08:00
k0 = k;
for (i = 0; i < s->n_seg[f]; ++i, ++k)
{
2018-07-15 10:52:36 +08:00
int j, l, t, rep_len = 0;
2018-07-15 11:43:34 +08:00
qlens[i] = s->seq[k].l_seq;
for (j = 0, s->n_reg[k] = 0; j < s->p->n_parts; ++j)
{
mm_err_fread(&n_reg_part[j], sizeof(int), 1, fp[j]);
mm_err_fread(&rep_len_part[j], sizeof(int), 1, fp[j]);
2018-07-15 11:43:34 +08:00
mm_err_fread(&frag_gap_part[j], sizeof(int), 1, fp[j]);
2018-07-15 10:52:36 +08:00
s->n_reg[k] += n_reg_part[j];
if (rep_len < rep_len_part[j])
rep_len = rep_len_part[j];
}
s->reg[k] = CALLOC(mm_reg1_t, s->n_reg[k]);
for (j = 0, l = 0; j < s->p->n_parts; ++j)
{
for (t = 0; t < n_reg_part[j]; ++t, ++l)
{
2018-07-15 10:52:36 +08:00
mm_reg1_t *r = &s->reg[k][l];
uint32_t capacity;
mm_err_fread(r, sizeof(mm_reg1_t), 1, fp[j]);
2018-07-15 22:55:00 +08:00
r->rid += s->p->rid_shift[j];
if (opt->flag & MM_F_CIGAR)
{
2018-07-15 10:52:36 +08:00
mm_err_fread(&capacity, 4, 1, fp[j]);
r->p = (mm_extra_t *)calloc(capacity, 4);
2018-07-15 10:52:36 +08:00
r->p->capacity = capacity;
mm_err_fread(r->p, r->p->capacity, 4, fp[j]);
}
}
}
if (!(opt->flag & MM_F_SR) && s->seq[k].l_seq >= opt->rank_min_len)
mm_update_dp_max(s->seq[k].l_seq, s->n_reg[k], s->reg[k], opt->rank_frac, opt->a, opt->b);
for (j = 0; j < s->n_reg[k]; ++j)
{
mm_reg1_t *r = &s->reg[k][j];
if (r->p)
r->p->dp_max2 = 0; // reset ->dp_max2 as mm_set_parent() doesn't clear it; necessary with mm_update_dp_max()
r->subsc = 0; // this may not be necessary
r->n_sub = 0; // n_sub will be an underestimate as we don't see all the chains now, but it can't be accurate anyway
}
2020-01-21 23:33:39 +08:00
mm_hit_sort(km, &s->n_reg[k], s->reg[k], opt->alt_drop);
mm_set_parent(km, opt->mask_level, opt->mask_len, s->n_reg[k], s->reg[k], opt->a * 2 + opt->b, opt->flag & MM_F_HARD_MLEVEL, opt->alt_drop);
if (!(opt->flag & MM_F_ALL_CHAINS))
{
mm_select_sub(km, opt->pri_ratio, s->p->mi->k * 2, opt->best_n, 0, opt->max_gap * 0.8, &s->n_reg[k], s->reg[k]);
2018-07-15 10:52:36 +08:00
mm_set_sam_pri(s->n_reg[k], s->reg[k]);
}
mm_set_mapq(km, s->n_reg[k], s->reg[k], opt->min_chain_score, opt->a, rep_len, !!(opt->flag & MM_F_SR));
}
if (s->n_seg[f] == 2 && opt->pe_ori >= 0 && (opt->flag & MM_F_CIGAR))
2018-07-15 11:43:34 +08:00
mm_pair(km, frag_gap_part[0], opt->pe_bonus, opt->a * 2 + opt->b, opt->a, qlens, &s->n_reg[k0], &s->reg[k0]);
2018-07-15 10:52:36 +08:00
}
2018-07-15 11:43:34 +08:00
free(qlens);
2018-07-15 10:52:36 +08:00
km_destroy(km);
}
2017-04-26 19:36:46 +08:00
static void *worker_pipeline(void *shared, int step, void *in)
{
2017-10-05 22:38:09 +08:00
int i, j, k;
pipeline_t *p = (pipeline_t *)shared;
if (step == 0)
{ // step 0: read sequences
2017-07-06 06:23:50 +08:00
int with_qual = (!!(p->opt->flag & MM_F_OUT_SAM) && !(p->opt->flag & MM_F_NO_QUAL));
int with_comment = !!(p->opt->flag & MM_F_COPY_COMMENT);
2017-10-06 03:48:17 +08:00
int frag_mode = (p->n_fp > 1 || !!(p->opt->flag & MM_F_FRAG_MODE));
step_t *s;
s = (step_t *)calloc(1, sizeof(step_t));
if (p->n_fp > 1)
s->seq = mm_bseq_read_frag2(p->n_fp, p->fp, p->mini_batch_size, with_qual, with_comment, &s->n_seq);
else
s->seq = mm_bseq_read3(p->fp[0], p->mini_batch_size, with_qual, with_comment, frag_mode, &s->n_seq);
if (s->seq)
{
2017-04-26 19:36:46 +08:00
s->p = p;
for (i = 0; i < s->n_seq; ++i)
s->seq[i].rid = p->n_processed++;
s->buf = (mm_tbuf_t **)calloc(p->n_threads, sizeof(mm_tbuf_t *));
2017-04-26 19:36:46 +08:00
for (i = 0; i < p->n_threads; ++i)
s->buf[i] = mm_tbuf_init();
s->n_reg = (int *)calloc(5 * s->n_seq, sizeof(int));
2018-07-15 11:43:34 +08:00
s->seg_off = s->n_reg + s->n_seq; // seg_off, n_seg, rep_len and frag_gap are allocated together with n_reg
s->n_seg = s->seg_off + s->n_seq;
2018-07-15 00:15:10 +08:00
s->rep_len = s->n_seg + s->n_seq;
2018-07-15 11:43:34 +08:00
s->frag_gap = s->rep_len + s->n_seq;
s->reg = (mm_reg1_t **)calloc(s->n_seq, sizeof(mm_reg1_t *));
for (i = 1, j = 0; i <= s->n_seq; ++i)
if (i == s->n_seq || !frag_mode || !mm_qname_same(s->seq[i - 1].name, s->seq[i].name))
{
s->n_seg[s->n_frag] = i - j;
s->seg_off[s->n_frag++] = j;
j = i;
}
2017-04-26 19:36:46 +08:00
return s;
}
else
free(s);
}
else if (step == 1)
{ // step 1: map
if (p->n_parts > 0)
merge_hits((step_t *)in);
else
kt_for(p->n_threads, worker_for, in, ((step_t *)in)->n_frag);
2017-04-26 19:36:46 +08:00
return in;
}
else if (step == 2)
{ // step 2: output
void *km = 0;
step_t *s = (step_t *)in;
2017-04-26 19:36:46 +08:00
const mm_idx_t *mi = p->mi;
for (i = 0; i < p->n_threads; ++i)
mm_tbuf_destroy(s->buf[i]);
2017-04-26 19:36:46 +08:00
free(s->buf);
if ((p->opt->flag & MM_F_OUT_CS) && !(mm_dbg_flag & MM_DBG_NO_KALLOC))
km = km_init();
for (k = 0; k < s->n_frag; ++k)
{
2017-10-05 22:38:09 +08:00
int seg_st = s->seg_off[k], seg_en = s->seg_off[k] + s->n_seg[k];
for (i = seg_st; i < seg_en; ++i)
{
2017-10-05 22:38:09 +08:00
mm_bseq1_t *t = &s->seq[i];
if (p->opt->split_prefix && p->n_parts == 0)
{ // then write to temporary files
mm_err_fwrite(&s->n_reg[i], sizeof(int), 1, p->fp_split);
mm_err_fwrite(&s->rep_len[i], sizeof(int), 1, p->fp_split);
2018-07-15 11:43:34 +08:00
mm_err_fwrite(&s->frag_gap[i], sizeof(int), 1, p->fp_split);
for (j = 0; j < s->n_reg[i]; ++j)
{
2018-07-15 10:52:36 +08:00
mm_reg1_t *r = &s->reg[i][j];
mm_err_fwrite(r, sizeof(mm_reg1_t), 1, p->fp_split);
if (p->opt->flag & MM_F_CIGAR)
{
2018-07-15 10:52:36 +08:00
mm_err_fwrite(&r->p->capacity, 4, 1, p->fp_split);
mm_err_fwrite(r->p, r->p->capacity, 4, p->fp_split);
}
}
}
else if (s->n_reg[i] > 0)
{ // the query has at least one hit
for (j = 0; j < s->n_reg[i]; ++j)
{
2018-07-15 00:15:10 +08:00
mm_reg1_t *r = &s->reg[i][j];
assert(!r->sam_pri || r->id == r->parent);
if ((p->opt->flag & MM_F_NO_PRINT_2ND) && r->id != r->parent)
continue;
2018-07-15 10:52:36 +08:00
if (p->opt->flag & MM_F_OUT_SAM)
mm_write_sam3(&p->str, mi, t, i - seg_st, j, s->n_seg[k], &s->n_reg[seg_st], (const mm_reg1_t *const *)&s->reg[seg_st], km, p->opt->flag, s->rep_len[i]);
2018-07-15 10:52:36 +08:00
else
mm_write_paf3(&p->str, mi, t, r, km, p->opt->flag, s->rep_len[i]);
2018-07-15 10:52:36 +08:00
mm_err_puts(p->str.s);
2018-07-15 00:15:10 +08:00
}
}
else if ((p->opt->flag & MM_F_PAF_NO_HIT) || ((p->opt->flag & MM_F_OUT_SAM) && !(p->opt->flag & MM_F_SAM_HIT_ONLY)))
{ // output an empty hit, if requested
2018-07-15 00:15:10 +08:00
if (p->opt->flag & MM_F_OUT_SAM)
mm_write_sam3(&p->str, mi, t, i - seg_st, -1, s->n_seg[k], &s->n_reg[seg_st], (const mm_reg1_t *const *)&s->reg[seg_st], km, p->opt->flag, s->rep_len[i]);
2018-07-15 00:15:10 +08:00
else
mm_write_paf3(&p->str, mi, t, 0, 0, p->opt->flag, s->rep_len[i]);
mm_err_puts(p->str.s);
2017-10-05 22:38:09 +08:00
}
2017-04-26 19:36:46 +08:00
}
for (i = seg_st; i < seg_en; ++i)
{
for (j = 0; j < s->n_reg[i]; ++j)
free(s->reg[i][j].p);
2017-10-05 22:38:09 +08:00
free(s->reg[i]);
free(s->seq[i].seq);
free(s->seq[i].name);
if (s->seq[i].qual)
free(s->seq[i].qual);
if (s->seq[i].comment)
free(s->seq[i].comment);
2017-07-01 02:40:54 +08:00
}
2017-04-26 19:36:46 +08:00
}
free(s->reg);
free(s->n_reg);
free(s->seq); // seg_off, n_seg, rep_len and frag_gap were allocated with reg; no memory leak here
km_destroy(km);
2017-06-29 11:56:33 +08:00
if (mm_verbose >= 3)
fprintf(stderr, "[M::%s::%.3f*%.2f] mapped %d sequences\n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0), s->n_seq);
2017-04-26 19:36:46 +08:00
free(s);
}
return 0;
2017-04-26 19:36:46 +08:00
}
2018-07-15 10:52:36 +08:00
static mm_bseq_file_t **open_bseqs(int n, const char **fn)
2017-04-26 19:36:46 +08:00
{
2018-07-15 10:52:36 +08:00
mm_bseq_file_t **fp;
int i, j;
fp = (mm_bseq_file_t **)calloc(n, sizeof(mm_bseq_file_t *));
for (i = 0; i < n; ++i)
{
if ((fp[i] = mm_bseq_open(fn[i])) == 0)
{
if (mm_verbose >= 1)
fprintf(stderr, "ERROR: failed to open file '%s': %s\n", fn[i], strerror(errno));
for (j = 0; j < i; ++j)
2018-07-15 10:52:36 +08:00
mm_bseq_close(fp[j]);
free(fp);
return 0;
}
}
2018-07-15 10:52:36 +08:00
return fp;
}
int mm_map_file_frag(const mm_idx_t *idx, int n_segs, const char **fn, const mm_mapopt_t *opt, int n_threads)
{
#ifdef SHOW_PERF
int64_t tmp_cur_time = get_mseconds();
#endif
2018-07-15 10:52:36 +08:00
int i, pl_threads;
pipeline_t pl;
if (n_segs < 1)
return -1;
2018-07-15 10:52:36 +08:00
memset(&pl, 0, sizeof(pipeline_t));
pl.n_fp = n_segs;
pl.fp = open_bseqs(pl.n_fp, fn);
if (pl.fp == 0)
return -1;
2017-04-26 19:36:46 +08:00
pl.opt = opt, pl.mi = idx;
pl.n_threads = n_threads > 1 ? n_threads : 1;
pl.mini_batch_size = opt->mini_batch_size;
2018-07-15 00:15:10 +08:00
if (opt->split_prefix)
pl.fp_split = mm_split_init(opt->split_prefix, idx);
pl_threads = n_threads == 1 ? 1 : (opt->flag & MM_F_2_IO_THREADS) ? 3
: 2;
kt_pipeline(pl_threads, worker_pipeline, &pl, 3);
2018-07-15 10:52:36 +08:00
2017-06-26 10:05:20 +08:00
free(pl.str.s);
if (pl.fp_split)
fclose(pl.fp_split);
2018-07-15 10:52:36 +08:00
for (i = 0; i < pl.n_fp; ++i)
mm_bseq_close(pl.fp[i]);
free(pl.fp);
#ifdef SHOW_PERF
time_mm_map_file_frag += get_mseconds() - tmp_cur_time;
#endif
2017-04-26 19:36:46 +08:00
return 0;
}
int mm_map_file(const mm_idx_t *idx, const char *fn, const mm_mapopt_t *opt, int n_threads)
{
2017-10-06 03:48:17 +08:00
return mm_map_file_frag(idx, 1, &fn, opt, n_threads);
}
2018-07-15 10:52:36 +08:00
int mm_split_merge(int n_segs, const char **fn, const mm_mapopt_t *opt, int n_split_idx)
{
int i;
pipeline_t pl;
2018-07-16 10:11:32 +08:00
mm_idx_t *mi;
if (n_segs < 1 || n_split_idx < 1)
return -1;
2018-07-15 10:52:36 +08:00
memset(&pl, 0, sizeof(pipeline_t));
pl.n_fp = n_segs;
pl.fp = open_bseqs(pl.n_fp, fn);
if (pl.fp == 0)
return -1;
2018-07-15 10:52:36 +08:00
pl.opt = opt;
pl.mini_batch_size = opt->mini_batch_size;
2018-07-15 22:55:00 +08:00
pl.n_parts = n_split_idx;
pl.fp_parts = CALLOC(FILE *, pl.n_parts);
2018-07-15 22:55:00 +08:00
pl.rid_shift = CALLOC(uint32_t, pl.n_parts);
2018-07-16 10:11:32 +08:00
pl.mi = mi = mm_split_merge_prep(opt->split_prefix, n_split_idx, pl.fp_parts, pl.rid_shift);
if (pl.mi == 0)
{
2018-07-15 22:55:00 +08:00
free(pl.fp_parts);
free(pl.rid_shift);
return -1;
}
for (i = n_split_idx - 1; i > 0; --i)
pl.rid_shift[i] = pl.rid_shift[i - 1];
for (pl.rid_shift[0] = 0, i = 1; i < n_split_idx; ++i)
pl.rid_shift[i] += pl.rid_shift[i - 1];
2018-07-15 23:03:18 +08:00
if (opt->flag & MM_F_OUT_SAM)
2018-07-25 11:29:55 +08:00
for (i = 0; i < (int32_t)pl.mi->n_seq; ++i)
2018-07-15 23:03:18 +08:00
printf("@SQ\tSN:%s\tLN:%d\n", pl.mi->seq[i].name, pl.mi->seq[i].len);
2018-07-15 22:55:00 +08:00
2018-07-15 10:52:36 +08:00
kt_pipeline(2, worker_pipeline, &pl, 3);
2018-07-15 22:55:00 +08:00
2018-07-15 10:52:36 +08:00
free(pl.str.s);
2018-07-16 10:11:32 +08:00
mm_idx_destroy(mi);
2018-07-15 22:55:00 +08:00
free(pl.rid_shift);
2018-07-15 10:52:36 +08:00
for (i = 0; i < n_split_idx; ++i)
fclose(pl.fp_parts[i]);
free(pl.fp_parts);
for (i = 0; i < pl.n_fp; ++i)
mm_bseq_close(pl.fp[i]);
free(pl.fp);
2018-07-15 23:03:18 +08:00
mm_split_rm_tmp(opt->split_prefix, n_split_idx);
2018-07-15 10:52:36 +08:00
return 0;
}