diff --git a/cooler/_binning.py b/cooler/_binning.py index 46d8bf45..4ecad5f4 100644 --- a/cooler/_binning.py +++ b/cooler/_binning.py @@ -386,9 +386,11 @@ def __init__(self, filepath, chromsizes, bins, map=map, n_chunks=1, is_one_based n_lines = f.get_linecount() max_chunk = int(100e6) n_chunks = n_lines // 2 // max_chunk + old_n = self.n_chunks self.n_chunks = max(self.n_chunks, n_chunks) - logger.info("Pairs file has {} lines. Increasing max-split to {}.".format( - n_lines, self.n_chunks)) + if self.n_chunks > old_n: + logger.info("Pairs file has {} lines. Increasing max-split to {}.".format( + n_lines, self.n_chunks)) # all requested contigs will be placed in the output matrix self.gs = GenomeSegmentation(chromsizes, bins) diff --git a/cooler/cli/cload.py b/cooler/cli/cload.py index bc0be33e..d866fccf 100644 --- a/cooler/cli/cload.py +++ b/cooler/cli/cload.py @@ -203,7 +203,8 @@ def tabix(bins, pairs_path, cool_path, metadata, assembly, nproc, zero_based, ma opts['C2'] = kwargs['chrom2'] - 1 if 'pos2' in kwargs: opts['P2'] = kwargs['pos2'] - 1 - iterator = TabixAggregator(pairs_path, chromsizes, bins, map=map, zero_based=zero_based, n_chunks=max_split, **opts) + iterator = TabixAggregator(pairs_path, chromsizes, bins, map=map, + is_one_based=(not zero_based), n_chunks=max_split, **opts) create(cool_path, bins, iterator, metadata, assembly) finally: if nproc > 1: @@ -257,7 +258,8 @@ def pairix(bins, pairs_path, cool_path, metadata, assembly, nproc, zero_based, m map = pool.imap else: map = six.moves.map - iterator = PairixAggregator(pairs_path, chromsizes, bins, map=map, zero_based=zero_based, n_chunks=max_split) + iterator = PairixAggregator(pairs_path, chromsizes, bins, map=map, + is_one_based=(not zero_based), n_chunks=max_split) create(cool_path, bins, iterator, metadata, assembly) finally: if nproc > 1: