Skip to content

Commit

Permalink
take FnMut closures by reference
Browse files Browse the repository at this point in the history
I mistakenly thought these had to be monomorphized. (The FnOnce still
does, until rust-lang/rfcs#1909 is implemented.) Turns out this way works
fine. It should result in less compile time / code size, though I didn't check
this.
  • Loading branch information
scottlamb committed Feb 23, 2018
1 parent d9841fd commit 843e1b4
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 27 deletions.
42 changes: 22 additions & 20 deletions db/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -999,9 +999,9 @@ impl LockedDatabase {

/// Lists the specified recordings in ascending order by start time, passing them to a supplied
/// function. Given that the function is called with the database lock held, it should be quick.
pub fn list_recordings_by_time<F>(&self, stream_id: i32, desired_time: Range<recording::Time>,
f: F) -> Result<(), Error>
where F: FnMut(ListRecordingsRow) -> Result<(), Error> {
pub fn list_recordings_by_time(
&self, stream_id: i32, desired_time: Range<recording::Time>,
f: &mut FnMut(ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
let mut stmt = self.conn.prepare_cached(&self.list_recordings_by_time_sql)?;
let rows = stmt.query_named(&[
(":stream_id", &stream_id),
Expand All @@ -1011,9 +1011,9 @@ impl LockedDatabase {
}

/// Lists the specified recordigs in ascending order by id.
pub fn list_recordings_by_id<F>(&self, stream_id: i32, desired_ids: Range<i32>, f: F)
-> Result<(), Error>
where F: FnMut(ListRecordingsRow) -> Result<(), Error> {
pub fn list_recordings_by_id(
&self, stream_id: i32, desired_ids: Range<i32>,
f: &mut FnMut(ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
let mut stmt = self.conn.prepare_cached(LIST_RECORDINGS_BY_ID_SQL)?;
let rows = stmt.query_named(&[
(":start", &CompositeId::new(stream_id, desired_ids.start).0),
Expand All @@ -1022,8 +1022,9 @@ impl LockedDatabase {
self.list_recordings_inner(rows, f)
}

fn list_recordings_inner<F>(&self, mut rows: rusqlite::Rows, mut f: F) -> Result<(), Error>
where F: FnMut(ListRecordingsRow) -> Result<(), Error> {
fn list_recordings_inner(
&self, mut rows: rusqlite::Rows,
f: &mut FnMut(ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
while let Some(row) = rows.next() {
let row = row?;
let id = CompositeId(row.get_checked::<_, i64>(0)?);
Expand Down Expand Up @@ -1052,11 +1053,11 @@ impl LockedDatabase {
/// Calls `list_recordings_by_time` and aggregates consecutive recordings.
/// Rows are given to the callback in arbitrary order. Callers which care about ordering
/// should do their own sorting.
pub fn list_aggregated_recordings<F>(&self, stream_id: i32,
desired_time: Range<recording::Time>,
forced_split: recording::Duration,
mut f: F) -> Result<(), Error>
where F: FnMut(&ListAggregatedRecordingsRow) -> Result<(), Error> {
pub fn list_aggregated_recordings(
&self, stream_id: i32, desired_time: Range<recording::Time>,
forced_split: recording::Duration,
f: &mut FnMut(&ListAggregatedRecordingsRow) -> Result<(), Error>)
-> Result<(), Error> {
// Iterate, maintaining a map from a recording_id to the aggregated row for the latest
// batch of recordings from the run starting at that id. Runs can be split into multiple
// batches for a few reasons:
Expand All @@ -1071,7 +1072,7 @@ impl LockedDatabase {
// their timestamps overlap. Tracking all active runs prevents that interleaving from
// causing problems.)
let mut aggs: BTreeMap<i32, ListAggregatedRecordingsRow> = BTreeMap::new();
self.list_recordings_by_time(stream_id, desired_time, |row| {
self.list_recordings_by_time(stream_id, desired_time, &mut |row| {
let recording_id = row.id.recording();
let run_start_id = recording_id - row.run_offset;
let needs_flush = if let Some(a) = aggs.get(&run_start_id) {
Expand Down Expand Up @@ -1146,8 +1147,9 @@ impl LockedDatabase {

/// Lists the oldest sample files (to delete to free room).
/// `f` should return true as long as further rows are desired.
pub(crate) fn list_oldest_sample_files<F>(&self, stream_id: i32, mut f: F) -> Result<(), Error>
where F: FnMut(ListOldestSampleFilesRow) -> bool {
pub(crate) fn list_oldest_sample_files(
&self, stream_id: i32, f: &mut FnMut(ListOldestSampleFilesRow) -> bool)
-> Result<(), Error> {
let s = match self.streams_by_id.get(&stream_id) {
None => bail!("no stream {}", stream_id),
Some(s) => s,
Expand Down Expand Up @@ -1805,7 +1807,7 @@ mod tests {
{
let db = db.lock();
let all_time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
db.list_recordings_by_time(stream_id, all_time, |_row| {
db.list_recordings_by_time(stream_id, all_time, &mut |_row| {
rows += 1;
Ok(())
}).unwrap();
Expand All @@ -1830,7 +1832,7 @@ mod tests {
{
let db = db.lock();
let all_time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
db.list_recordings_by_time(stream_id, all_time, |row| {
db.list_recordings_by_time(stream_id, all_time, &mut |row| {
rows += 1;
recording_id = Some(row.id);
assert_eq!(r.time,
Expand All @@ -1845,7 +1847,7 @@ mod tests {
assert_eq!(1, rows);

rows = 0;
db.lock().list_oldest_sample_files(stream_id, |row| {
db.lock().list_oldest_sample_files(stream_id, &mut |row| {
rows += 1;
assert_eq!(recording_id, Some(row.id));
assert_eq!(r.time, row.time);
Expand Down Expand Up @@ -2052,7 +2054,7 @@ mod tests {
{
let mut db = db.lock();
let mut v = Vec::new();
db.list_oldest_sample_files(stream_id, |r| { v.push(r); true }).unwrap();
db.list_oldest_sample_files(stream_id, &mut |r| { v.push(r); true }).unwrap();
assert_eq!(1, v.len());
db.delete_recordings(&mut v);
db.flush("delete test").unwrap();
Expand Down
2 changes: 1 addition & 1 deletion db/dir.rs
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ fn get_rows_to_delete(db: &db::LockedDatabase, stream_id: i32,
return Ok(());
}
let mut n = 0;
db.list_oldest_sample_files(stream_id, |row| {
db.list_oldest_sample_files(stream_id, &mut |row| {
bytes_to_delete += row.sample_file_bytes as i64;
to_delete.push(row);
n += 1;
Expand Down
2 changes: 1 addition & 1 deletion db/testutil.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ impl TestDb {
db.flush("create_recording_from_encoder").unwrap();
let mut row = None;
db.list_recordings_by_id(TEST_STREAM_ID, id.recording() .. id.recording()+1,
|r| { row = Some(r); Ok(()) }).unwrap();
&mut |r| { row = Some(r); Ok(()) }).unwrap();
row.unwrap()
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/mp4.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1788,7 +1788,7 @@ mod tests {
let all_time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
{
let db = tdb.db.lock();
db.list_recordings_by_time(TEST_STREAM_ID, all_time, |r| {
db.list_recordings_by_time(TEST_STREAM_ID, all_time, &mut |r| {
let d = r.duration_90k;
assert!(skip_90k + shorten_90k < d);
builder.append(&*db, r, skip_90k .. d - shorten_90k).unwrap();
Expand Down Expand Up @@ -2252,7 +2252,7 @@ mod bench {
let segment = {
let all_time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
let mut row = None;
db.list_recordings_by_time(testutil::TEST_STREAM_ID, all_time, |r| {
db.list_recordings_by_time(testutil::TEST_STREAM_ID, all_time, &mut |r| {
row = Some(r);
Ok(())
}).unwrap();
Expand Down
2 changes: 1 addition & 1 deletion src/streamer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ mod tests {
Frame{start_90k: 90011, duration_90k: 0, is_key: false},
]);
let mut recordings = Vec::new();
db.list_recordings_by_id(testutil::TEST_STREAM_ID, 1..3, |r| {
db.list_recordings_by_id(testutil::TEST_STREAM_ID, 1..3, &mut |r| {
recordings.push(r);
Ok(())
}).unwrap();
Expand Down
4 changes: 2 additions & 2 deletions src/web.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ impl ServiceInner {
.ok_or_else(|| format_err!("no such camera {}", uuid))?;
let stream_id = camera.streams[type_.index()]
.ok_or_else(|| format_err!("no such stream {}/{}", uuid, type_))?;
db.list_aggregated_recordings(stream_id, r, split, |row| {
db.list_aggregated_recordings(stream_id, r, split, &mut |row| {
let end = row.ids.end - 1; // in api, ids are inclusive.
out.recordings.push(json::Recording {
start_id: row.ids.start,
Expand Down Expand Up @@ -327,7 +327,7 @@ impl ServiceInner {
let db = self.db.lock();
let mut prev = None;
let mut cur_off = 0;
db.list_recordings_by_id(stream_id, s.ids.clone(), |r| {
db.list_recordings_by_id(stream_id, s.ids.clone(), &mut |r| {
let recording_id = r.id.recording();

// Check for missing recordings.
Expand Down

0 comments on commit 843e1b4

Please sign in to comment.