fetch_new_messages(): fetch messages sequentially (#3688)

If we fetch messages out of order, then f.e. reactions don't work because if we process a reaction not yet having the corresponding message processed, the reaction is thrown away.
This commit is contained in:
iequidoo
2022-11-15 15:12:33 -03:00
committed by iequidoo
parent da6c68629d
commit 25be8ccd05
2 changed files with 35 additions and 40 deletions

View File

@@ -8,6 +8,7 @@
### Fixes ### Fixes
- fix detection of "All mail", "Trash", "Junk" etc folders. #3760 - fix detection of "All mail", "Trash", "Junk" etc folders. #3760
- fetch messages sequentially to fix reactions on partially downloaded messages #3688
## 1.101.0 ## 1.101.0

View File

@@ -780,8 +780,7 @@ impl Imap {
let show_emails = ShowEmails::from_i32(context.get_config_int(Config::ShowEmails).await?) let show_emails = ShowEmails::from_i32(context.get_config_int(Config::ShowEmails).await?)
.unwrap_or_default(); .unwrap_or_default();
let download_limit = context.download_limit().await?; let download_limit = context.download_limit().await?;
let mut uids_fetch_fully = Vec::with_capacity(msgs.len()); let mut uids_fetch = Vec::<(_, bool /* partially? */)>::with_capacity(msgs.len() + 1);
let mut uids_fetch_partially = Vec::with_capacity(msgs.len());
let mut uid_message_ids = BTreeMap::new(); let mut uid_message_ids = BTreeMap::new();
let mut largest_uid_skipped = None; let mut largest_uid_skipped = None;
@@ -840,14 +839,11 @@ impl Imap {
.await? .await?
{ {
match download_limit { match download_limit {
Some(download_limit) => { Some(download_limit) => uids_fetch.push((
if fetch_response.size.unwrap_or_default() > download_limit { uid,
uids_fetch_partially.push(uid); fetch_response.size.unwrap_or_default() > download_limit,
} else { )),
uids_fetch_fully.push(uid) None => uids_fetch.push((uid, false)),
}
}
None => uids_fetch_fully.push(uid),
} }
uid_message_ids.insert(uid, message_id); uid_message_ids.insert(uid, message_id);
} else { } else {
@@ -855,33 +851,37 @@ impl Imap {
} }
} }
if !uids_fetch_fully.is_empty() || !uids_fetch_partially.is_empty() { if !uids_fetch.is_empty() {
self.connectivity.set_working(context).await; self.connectivity.set_working(context).await;
} }
// Actually download messages. // Actually download messages.
let (largest_uid_fully_fetched, mut received_msgs) = self let mut largest_uid_fetched: u32 = 0;
.fetch_many_msgs( let mut received_msgs = Vec::with_capacity(uids_fetch.len());
context, let mut uids_fetch_in_batch = Vec::with_capacity(max(uids_fetch.len(), 1));
folder, let mut fetch_partially = false;
uids_fetch_fully, uids_fetch.push((0, !uids_fetch.last().unwrap_or(&(0, false)).1));
&uid_message_ids, for (uid, fp) in uids_fetch {
false, if fp != fetch_partially {
fetch_existing_msgs, let (largest_uid_fetched_in_batch, received_msgs_in_batch) = self
) .fetch_many_msgs(
.await?; context,
folder,
let (largest_uid_partially_fetched, received_msgs_2) = self uids_fetch_in_batch.split_off(0),
.fetch_many_msgs( &uid_message_ids,
context, fetch_partially,
folder, fetch_existing_msgs,
uids_fetch_partially, )
&uid_message_ids, .await?;
true, received_msgs.extend(received_msgs_in_batch);
fetch_existing_msgs, largest_uid_fetched = max(
) largest_uid_fetched,
.await?; largest_uid_fetched_in_batch.unwrap_or(0),
received_msgs.extend(received_msgs_2); );
fetch_partially = fp;
}
uids_fetch_in_batch.push(uid);
}
// determine which uid_next to use to update to // determine which uid_next to use to update to
// receive_imf() returns an `Err` value only on recoverable errors, otherwise it just logs an error. // receive_imf() returns an `Err` value only on recoverable errors, otherwise it just logs an error.
@@ -889,13 +889,7 @@ impl Imap {
// So: Update the uid_next to the largest uid that did NOT recoverably fail. Not perfect because if there was // So: Update the uid_next to the largest uid that did NOT recoverably fail. Not perfect because if there was
// another message afterwards that succeeded, we will not retry. The upside is that we will not retry an infinite amount of times. // another message afterwards that succeeded, we will not retry. The upside is that we will not retry an infinite amount of times.
let largest_uid_without_errors = max( let largest_uid_without_errors = max(largest_uid_fetched, largest_uid_skipped.unwrap_or(0));
max(
largest_uid_fully_fetched.unwrap_or(0),
largest_uid_partially_fetched.unwrap_or(0),
),
largest_uid_skipped.unwrap_or(0),
);
let new_uid_next = largest_uid_without_errors + 1; let new_uid_next = largest_uid_without_errors + 1;
if new_uid_next > old_uid_next { if new_uid_next > old_uid_next {