Compare commits

...

14 Commits

Author SHA1 Message Date
holger krekel
6e16ea5f8b intermediate commit 2019-08-23 13:30:09 +02:00
holger krekel
2fd4b09cbc rustify several things around the precheck function 2019-08-22 21:49:47 +02:00
holger krekel
157d68246f Merge branch 'master' into cleanup/jobsend 2019-08-21 09:34:46 +02:00
holger krekel
6b044dd85a make python tests run the mvbox thread 2019-08-21 07:39:41 +02:00
holger krekel
9227c4b55c - simplify inbox-connection-guards and returns
- make dc_msg_exists safe
2019-08-20 11:15:28 +02:00
holger krekel
287ccb15ba address @dignifiedquire Default suggestion 2019-08-20 10:40:27 +02:00
holger krekel
f47e637519 cargo fmt 2019-08-20 09:31:05 +02:00
holger krekel
9633f251e0 deindent/cleanup set_mdnsent and delete_msg return code handling 2019-08-20 09:26:57 +02:00
holger krekel
e319f51342 Merge branch 'master' into cleanup/jobsend 2019-08-20 09:08:21 +02:00
holger krekel
55b62b0b7d cargo fmt 2019-08-20 09:03:36 +02:00
holger krekel
37d20bfb5c select_folder -> bool 2019-08-20 08:59:33 +02:00
holger krekel
6441ceeedc streamline imap mv() wrt to return codes, follow C more closely and add comments about missing dest_uid setting from imap 2019-08-20 08:36:27 +02:00
holger krekel
7641bb4d9a implement ImapResult enum like it was in C, and simplify logic a bit (with @r10s on the side) 2019-08-19 23:21:54 +02:00
holger krekel
67594b52f9 rustify job send 2019-08-19 22:13:49 +02:00
10 changed files with 607 additions and 627 deletions

View File

@@ -377,12 +377,15 @@ class IOThreads:
def is_started(self):
return len(self._name2thread) > 0
def start(self, imap=True, smtp=True):
def start(self, imap=True, smtp=True, mvbox=True):
assert not self.is_started()
if imap:
self._start_one_thread("imap", self.imap_thread_run)
self._start_one_thread("inbox", self.inbox_thread_run)
if smtp:
self._start_one_thread("smtp", self.smtp_thread_run)
if mvbox:
self._start_one_thread("mvbox", self.mvbox_thread_run)
def _start_one_thread(self, name, func):
self._name2thread[name] = t = threading.Thread(target=func, name=name)
@@ -397,13 +400,20 @@ class IOThreads:
for name, thread in self._name2thread.items():
thread.join()
def imap_thread_run(self):
self._log_event("py-bindings-info", 0, "IMAP THREAD START")
def inbox_thread_run(self):
self._log_event("py-bindings-info", 0, "INBOX IMAP THREAD START")
while not self._thread_quitflag:
lib.dc_perform_imap_jobs(self._dc_context)
lib.dc_perform_imap_fetch(self._dc_context)
lib.dc_perform_imap_idle(self._dc_context)
self._log_event("py-bindings-info", 0, "IMAP THREAD FINISHED")
self._log_event("py-bindings-info", 0, "INBOX IMAP THREAD FINISHED")
def mvbox_thread_run(self):
self._log_event("py-bindings-info", 0, "MVBOX IMAP THREAD START")
while not self._thread_quitflag:
lib.dc_perform_mvbox_fetch(self._dc_context)
lib.dc_perform_mvbox_idle(self._dc_context)
self._log_event("py-bindings-info", 0, "MVBOX IMAP THREAD FINISHED")
def smtp_thread_run(self):
self._log_event("py-bindings-info", 0, "SMTP THREAD START")

View File

@@ -1031,7 +1031,7 @@ pub fn get_chat_msgs(context: &Context, chat_id: u32, flags: u32, marker1before:
let curr_local_timestamp = ts + cnv_to_local;
let curr_day = (curr_local_timestamp / 86400) as libc::c_int;
if curr_day != last_day {
ret.push(DC_MSG_ID_LAST_SPECIAL as u32);
ret.push(DC_MSG_ID_LAST_SPECIAL);
last_day = curr_day;
}
}

View File

@@ -108,7 +108,7 @@ impl Default for Chattype {
pub const DC_MSG_ID_MARKER1: usize = 1;
const DC_MSG_ID_DAYMARKER: usize = 9;
pub const DC_MSG_ID_LAST_SPECIAL: usize = 9;
pub const DC_MSG_ID_LAST_SPECIAL: u32 = 9;
/// approx. max. length returned by dc_msg_get_text()
const DC_MAX_GET_TEXT_LEN: usize = 30000;
@@ -122,6 +122,15 @@ pub const DC_CONTACT_ID_LAST_SPECIAL: usize = 9;
pub const DC_CREATE_MVBOX: usize = 1;
#[repr(i32)]
#[derive(Debug, Display, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive, ToSql, FromSql)]
pub enum Delay {
Default = 0,
AtOnce = -1,
Standard = 3,
IncreationPoll = 2,
}
// Flags for configuring IMAP and SMTP servers.
// These flags are optional
// and may be set together with the username, password etc.

View File

@@ -193,43 +193,41 @@ unsafe fn cb_precheck_imf(
server_folder: &str,
server_uid: uint32_t,
) -> libc::c_int {
let mut rfc724_mid_exists: libc::c_int = 0i32;
let msg_id: uint32_t;
let mut old_server_folder: *mut libc::c_char = ptr::null_mut();
let mut old_server_uid: uint32_t = 0i32 as uint32_t;
let mut mark_seen: libc::c_int = 0i32;
msg_id = dc_rfc724_mid_exists(
let rfc724_mid = as_str(rfc724_mid)
.trim_start_matches("<")
.trim_end_matches(">");
let mut mark_seen = false;
info!(
context,
0,
"cb_precheck_imf rfc724_mid={:?}, server_folder={}, server_uid={}",
rfc724_mid,
&mut old_server_folder,
&mut old_server_uid,
server_folder,
server_uid
);
if msg_id != 0i32 as libc::c_uint {
rfc724_mid_exists = 1i32;
if *old_server_folder.offset(0isize) as libc::c_int == 0i32
&& old_server_uid == 0i32 as libc::c_uint
{
info!(
context,
0,
"[move] detected bbc-self {}",
as_str(rfc724_mid),
);
mark_seen = 1i32
} else if as_str(old_server_folder) != server_folder {
info!(
context,
0,
"[move] detected moved message {}",
as_str(rfc724_mid),
);
if let Some(res) = dc_rfc724_mid_exists_with_msg_state(context, rfc724_mid) {
let (msg_id, _, old_server_folder, old_server_uid) = res;
info!(context, 0, "precheck msg_id={}", msg_id);
if msg_id != 0 {
if old_server_folder.is_empty() {
info!(context, 0, "[move] detected bbc-self {}", rfc724_mid,);
mark_seen = true;
} else if old_server_folder != server_folder {
info!(context, 0, "[move] detected moved message {}", rfc724_mid,);
}
dc_update_msg_move_state(context, rfc724_mid, MoveState::Stay);
}
if as_str(old_server_folder) != server_folder || old_server_uid != server_uid {
if old_server_folder != server_folder || old_server_uid != server_uid {
info!(
context,
0, "precheck updated server_uid for msg_id={}", msg_id
);
dc_update_server_uid(context, rfc724_mid, server_folder, server_uid);
job_retry_msg_actions_at_once(context, msg_id);
}
dc_do_heuristics_moves(context, server_folder, msg_id);
if 0 != mark_seen {
if mark_seen {
job_add(
context,
Action::MarkseenMsgOnImap,
@@ -238,9 +236,9 @@ unsafe fn cb_precheck_imf(
0,
);
}
return msg_id as i32;
}
free(old_server_folder as *mut libc::c_void);
rfc724_mid_exists
0
}
fn cb_set_config(context: &Context, key: &str, value: Option<&str>) {

View File

@@ -1,5 +1,6 @@
use crate::constants::*;
use crate::context::*;
use crate::dc_tools::as_str;
use crate::job::*;
use crate::message::*;
use crate::param::Params;
@@ -26,7 +27,7 @@ pub unsafe fn dc_do_heuristics_moves(context: &Context, folder: &str, msg_id: u3
}
if dc_is_mvbox(context, folder) {
dc_update_msg_move_state(context, msg.rfc724_mid, MoveState::Stay);
dc_update_msg_move_state(context, as_str(msg.rfc724_mid), MoveState::Stay);
}
// 1 = dc message, 2 = reply to dc message
@@ -38,7 +39,7 @@ pub unsafe fn dc_do_heuristics_moves(context: &Context, folder: &str, msg_id: u3
Params::new(),
0,
);
dc_update_msg_move_state(context, msg.rfc724_mid, MoveState::Moving);
dc_update_msg_move_state(context, as_str(msg.rfc724_mid), MoveState::Moving);
}
}
}

View File

@@ -343,23 +343,7 @@ unsafe fn add_parts(
// check, if the mail is already in our database - if so, just update the folder/uid
// (if the mail was moved around) and finish. (we may get a mail twice eg. if it is
// moved between folders. make sure, this check is done eg. before securejoin-processing) */
let mut old_server_folder = std::ptr::null_mut();
let mut old_server_uid = 0;
if 0 != dc_rfc724_mid_exists(
context,
rfc724_mid,
&mut old_server_folder,
&mut old_server_uid,
) {
if as_str(old_server_folder) != server_folder.as_ref() || old_server_uid != server_uid {
dc_update_server_uid(context, rfc724_mid, server_folder.as_ref(), server_uid);
}
free(old_server_folder.cast());
cleanup(mime_in_reply_to, mime_references, txt_raw);
bail!("Message already in DB");
}
// XXX call precheck?
// 1 or 0 for yes/no
msgrmsg = mime_parser.is_send_by_messenger;

View File

@@ -12,14 +12,19 @@ use crate::dc_loginparam::*;
use crate::dc_tools::CStringExt;
use crate::oauth2::dc_get_oauth2_access_token;
use crate::types::*;
use deltachat_derive::*;
const DC_IMAP_SEEN: usize = 0x0001;
const DC_REGENERATE: usize = 0x01;
const DC_SUCCESS: usize = 3;
const DC_ALREADY_DONE: usize = 2;
const DC_RETRY_LATER: usize = 1;
const DC_FAILED: usize = 0;
#[repr(usize)]
#[derive(Debug, Display, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive, ToSql, FromSql)]
pub enum ImapResult {
Success = 3,
AlreadyDone = 2,
RetryLater = 1,
Failed = 0,
}
const PREFETCH_FLAGS: &str = "(UID ENVELOPE)";
const BODY_FLAGS: &str = "(FLAGS BODY.PEEK[])";
@@ -632,26 +637,30 @@ impl Imap {
}
}
fn select_folder<S: AsRef<str>>(&self, context: &Context, folder: Option<S>) -> usize {
fn select_folder<S: AsRef<str>>(&self, context: &Context, folder: Option<S>) -> bool {
info!(context, 0, "select_folder0");
if self.session.lock().unwrap().is_none() {
// we are in termination, noting useful to be done anymore
let mut cfg = self.config.write().unwrap();
cfg.selected_folder = None;
cfg.selected_folder_needs_expunge = false;
return 0;
return false;
}
info!(context, 0, "select_folder1");
// if there is a new folder and the new folder is equal to the selected one, there's nothing to do.
// if there is _no_ new folder, we continue as we might want to expunge below.
if let Some(ref folder) = folder {
if let Some(ref selected_folder) = self.config.read().unwrap().selected_folder {
if folder.as_ref() == selected_folder {
return 1;
return true;
}
}
}
info!(context, 0, "select_folder2");
// deselect existing folder, if needed (it's also done implicitly by SELECT, however, without EXPUNGE then)
if self.config.read().unwrap().selected_folder_needs_expunge {
info!(context, 0, "select_folder3");
if let Some(ref folder) = self.config.read().unwrap().selected_folder {
info!(context, 0, "Expunge messages in \"{}\".", folder);
@@ -665,22 +674,26 @@ impl Imap {
}
}
} else {
return 0;
unreachable!();
}
self.config.write().unwrap().selected_folder_needs_expunge = true;
self.config.write().unwrap().selected_folder_needs_expunge = false;
}
}
info!(context, 0, "select_folder4");
// select new folder
if let Some(ref folder) = folder {
if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
info!(context, 0, "select_folder5");
match session.select(folder) {
Ok(mailbox) => {
info!(context, 0, "select_folder6");
let mut config = self.config.write().unwrap();
config.selected_folder = Some(folder.as_ref().to_string());
config.selected_mailbox = Some(mailbox);
}
Err(err) => {
info!(context, 0, "select_folder7");
info!(
context,
0,
@@ -691,15 +704,14 @@ impl Imap {
self.config.write().unwrap().selected_folder = None;
self.should_reconnect.store(true, Ordering::Relaxed);
return 0;
return false;
}
}
} else {
return 0;
unreachable!();
}
}
1
true
}
fn get_config_last_seen_uid<S: AsRef<str>>(&self, context: &Context, folder: S) -> (u32, u32) {
@@ -728,7 +740,7 @@ impl Imap {
return 0;
}
if self.select_folder(context, Some(&folder)) == 0 {
if !self.select_folder(context, Some(&folder)) {
info!(
context,
0,
@@ -1006,7 +1018,7 @@ impl Imap {
self.setup_handle_if_needed(context);
let watch_folder = self.config.read().unwrap().watch_folder.clone();
if self.select_folder(context, watch_folder.as_ref()) == 0 {
if !self.select_folder(context, watch_folder.as_ref()) {
warn!(context, 0, "IMAP-IDLE not setup.",);
return self.fake_idle(context);
@@ -1034,14 +1046,17 @@ impl Imap {
// if needed, the ui can call dc_imap_interrupt_idle() to trigger a reconnect.
idle.set_keepalive(Duration::from_secs(23 * 60));
let res = idle.wait_keepalive();
eprintln!("idle wait_keepalive returned");
// Ignoring the error, as this happens when we try sending after the drop
let _send_res = sender.send(res);
eprintln!("idle sending result");
// Trigger condvar
let mut watch = lock.lock().unwrap();
*watch = true;
cvar.notify_one();
eprintln!("idle spawn thread ending");
}
});
receiver
@@ -1164,12 +1179,9 @@ impl Imap {
uid: u32,
dest_folder: S2,
dest_uid: &mut u32,
) -> usize {
let mut res = DC_RETRY_LATER;
let set = format!("{}", uid);
) -> ImapResult {
if uid == 0 {
res = DC_FAILED;
return ImapResult::Failed;
} else if folder.as_ref() == dest_folder.as_ref() {
info!(
context,
@@ -1179,101 +1191,85 @@ impl Imap {
uid,
dest_folder.as_ref()
);
return ImapResult::AlreadyDone;
}
info!(
context,
0,
"Moving message {}/{} to {}...",
folder.as_ref(),
uid,
dest_folder.as_ref()
);
res = DC_ALREADY_DONE;
} else {
info!(
if !self.select_folder(context, Some(folder.as_ref())) {
warn!(
context,
0,
"Moving message {}/{} to {}...",
folder.as_ref(),
uid,
dest_folder.as_ref()
"Cannot select folder {} for moving message.",
folder.as_ref()
);
if self.select_folder(context, Some(folder.as_ref())) == 0 {
warn!(
context,
0,
"Cannot select folder {} for moving message.",
folder.as_ref()
);
return if !self.should_reconnect() {
ImapResult::Failed
} else {
let moved = if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_mv(&set, &dest_folder) {
Ok(_) => {
res = DC_SUCCESS;
true
}
Err(err) => {
info!(
context,
0,
"Cannot move message, fallback to COPY/DELETE {}/{} to {}: {}",
folder.as_ref(),
uid,
dest_folder.as_ref(),
err
);
false
}
}
} else {
unreachable!();
};
if !moved {
let copied = if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_copy(&set, &dest_folder) {
Ok(_) => true,
Err(err) => {
eprintln!("error copy: {:?}", err);
info!(context, 0, "Cannot copy message.",);
false
}
}
} else {
unreachable!();
};
if copied {
if self.add_flag(context, uid, "\\Deleted") == 0 {
warn!(context, 0, "Cannot mark message as \"Deleted\".",);
}
self.config.write().unwrap().selected_folder_needs_expunge = true;
res = DC_SUCCESS;
}
ImapResult::RetryLater
};
}
let set = format!("{}", uid);
if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_mv(&set, &dest_folder) {
Ok(_) => {
*dest_uid = 0;
return ImapResult::Success;
}
Err(err) => {
info!(
context,
0,
"Cannot move message, fallback to COPY/DELETE {}/{} to {}: {}",
folder.as_ref(),
uid,
dest_folder.as_ref(),
err
);
}
}
}
} else {
unreachable!();
};
if res == DC_SUCCESS {
// TODO: is this correct?
*dest_uid = uid;
}
if res == DC_RETRY_LATER {
if self.should_reconnect() {
DC_RETRY_LATER
} else {
DC_FAILED
// message was NOT moved, let's try copy
if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_copy(&set, &dest_folder) {
Ok(_) => {}
Err(err) => {
info!(context, 0, "Cannot copy message. {:?}", err);
return ImapResult::Failed;
}
}
} else {
res
unreachable!();
};
if self.add_flag(context, uid, "\\Deleted") {
self.config.write().unwrap().selected_folder_needs_expunge = true;
return ImapResult::Success;
}
warn!(context, 0, "Cannot mark message as \"Deleted\".",);
return ImapResult::Failed;
}
fn add_flag<S: AsRef<str>>(&self, context: &Context, server_uid: u32, flag: S) -> usize {
fn add_flag<S: AsRef<str>>(&self, context: &Context, server_uid: u32, flag: S) -> bool {
if server_uid == 0 {
return 0;
return false;
}
if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
let set = format!("{}", server_uid);
let query = format!("+FLAGS ({})", flag.as_ref());
match session.uid_store(&set, &query) {
Ok(_) => {}
Ok(_) => {
return true;
}
Err(err) => {
warn!(
context,
@@ -1282,22 +1278,21 @@ impl Imap {
}
}
}
// All non-connection states are treated as success - the mail may
// already be deleted or moved away on the server.
if self.should_reconnect() {
0
} else {
1
}
false
}
pub fn set_seen<S: AsRef<str>>(&self, context: &Context, folder: S, uid: u32) -> usize {
let mut res = DC_RETRY_LATER;
pub fn set_seen<S: AsRef<str>>(&self, context: &Context, folder: S, uid: u32) -> ImapResult {
if uid == 0 {
res = DC_FAILED
} else if self.is_connected() {
info!(
context,
0,
"set_seen folder={} uid={}",
folder.as_ref(),
uid
);
return ImapResult::Failed;
}
if self.is_connected() {
info!(
context,
0,
@@ -1306,226 +1301,210 @@ impl Imap {
uid,
);
if self.select_folder(context, Some(folder.as_ref())) == 0 {
if !self.select_folder(context, Some(folder.as_ref())) {
warn!(
context,
0,
"Cannot select folder {} for setting SEEN flag.",
folder.as_ref(),
);
} else if self.add_flag(context, uid, "\\Seen") == 0 {
} else if !self.add_flag(context, uid, "\\Seen") {
warn!(context, 0, "Cannot mark message as seen.",);
} else {
res = DC_SUCCESS
}
}
if res == DC_RETRY_LATER {
if self.should_reconnect() {
DC_RETRY_LATER
} else {
DC_FAILED
}
} else {
res
}
}
pub fn set_mdnsent<S: AsRef<str>>(&self, context: &Context, folder: S, uid: u32) -> usize {
// returns 0=job should be retried later, 1=job done, 2=job done and flag just set
let mut res = DC_RETRY_LATER;
let set = format!("{}", uid);
if uid == 0 {
res = DC_FAILED;
} else if self.is_connected() {
info!(
context,
0,
"Marking message {}/{} as $MDNSent...",
folder.as_ref(),
uid,
);
if self.select_folder(context, Some(folder.as_ref())) == 0 {
warn!(
info!(
context,
0,
"Cannot select folder {} for setting $MDNSent flag.",
folder.as_ref()
"success marking message {}/{} as seen...",
folder.as_ref(),
uid,
);
} else {
// Check if the folder can handle the `$MDNSent` flag (see RFC 3503). If so, and not
// set: set the flags and return this information.
// If the folder cannot handle the `$MDNSent` flag, we risk duplicated MDNs; it's up
// to the receiving MUA to handle this then (eg. Delta Chat has no problem with this).
let can_create_flag = self
.config
.read()
.unwrap()
.selected_mailbox
.as_ref()
.map(|mbox| {
// empty means, everything can be stored
mbox.permanent_flags.is_empty()
|| mbox
.permanent_flags
.iter()
.find(|flag| match flag {
imap::types::Flag::Custom(s) => s == "$MDNSent",
_ => false,
})
.is_some()
})
.expect("just selected folder");
if can_create_flag {
let fetched_msgs =
if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_fetch(set, FETCH_FLAGS) {
Ok(res) => Some(res),
Err(err) => {
eprintln!("fetch error: {:?}", err);
None
}
}
} else {
unreachable!();
};
if let Some(msgs) = fetched_msgs {
let flag_set = msgs
.first()
.map(|msg| {
msg.flags()
.iter()
.find(|flag| match flag {
imap::types::Flag::Custom(s) => s == "$MDNSent",
_ => false,
})
.is_some()
})
.unwrap_or_else(|| false);
res = if flag_set {
DC_ALREADY_DONE
} else if self.add_flag(context, uid, "$MDNSent") != 0 {
DC_SUCCESS
} else {
res
};
if res == DC_SUCCESS {
info!(context, 0, "$MDNSent just set and MDN will be sent.");
} else {
info!(context, 0, "$MDNSent already set and MDN already sent.");
}
}
} else {
res = DC_SUCCESS;
info!(
context,
0, "Cannot store $MDNSent flags, risk sending duplicate MDN.",
);
}
return ImapResult::Success;
}
}
if res == DC_RETRY_LATER {
if self.should_reconnect() {
DC_RETRY_LATER
} else {
DC_FAILED
}
if self.should_reconnect() {
ImapResult::RetryLater
} else {
res
ImapResult::Failed
}
}
pub fn set_mdnsent<S: AsRef<str>>(&self, context: &Context, folder: S, uid: u32) -> ImapResult {
if uid == 0 {
return ImapResult::Failed;
}
if !self.is_connected() {
return ImapResult::RetryLater;
}
info!(
context,
0,
"Marking message {}/{} as $MDNSent...",
folder.as_ref(),
uid,
);
if !self.select_folder(context, Some(folder.as_ref())) {
warn!(
context,
0,
"Cannot select folder {} for setting $MDNSent flag.",
folder.as_ref()
);
return ImapResult::Failed;
}
// Check if the folder can handle the `$MDNSent` flag (see RFC 3503). If so, and not
// set: set the flags and return this information.
// If the folder cannot handle the `$MDNSent` flag, we risk duplicated MDNs; it's up
// to the receiving MUA to handle this then (eg. Delta Chat has no problem with this).
let can_create_flag = self
.config
.read()
.unwrap()
.selected_mailbox
.as_ref()
.map(|mbox| {
// empty means, everything can be stored
mbox.permanent_flags.is_empty()
|| mbox
.permanent_flags
.iter()
.find(|flag| match flag {
imap::types::Flag::Custom(s) => s == "$MDNSent",
_ => false,
})
.is_some()
})
.expect("just selected folder");
if !can_create_flag {
info!(
context,
0, "Cannot store $MDNSent flags, ignoring to prevent duplicate MDN.",
);
return ImapResult::Success;
}
let set = format!("{}", uid);
let fetched_msgs = if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_fetch(set, FETCH_FLAGS) {
Ok(res) => res,
Err(err) => {
eprintln!("fetch error: {:?}", err);
return ImapResult::Failed;
}
}
} else {
unreachable!();
};
let flag_set = fetched_msgs
.first()
.map(|msg| {
msg.flags()
.iter()
.find(|flag| match flag {
imap::types::Flag::Custom(s) => s == "$MDNSent",
_ => false,
})
.is_some()
})
.unwrap_or_else(|| false);
if flag_set {
info!(context, 0, "$MDNSent already set and MDN already sent.");
ImapResult::AlreadyDone
} else if self.add_flag(context, uid, "$MDNSent") {
info!(context, 0, "$MDNSent just set and MDN will be sent.");
ImapResult::Success
} else if self.should_reconnect() {
ImapResult::RetryLater
} else {
ImapResult::Failed
}
}
// only returns 0 on connection problems; we should try later again in this case *
pub fn delete_msg<S1: AsRef<str>, S2: AsRef<str>>(
&self,
context: &Context,
message_id: S1,
folder: S2,
server_uid: &mut u32,
) -> usize {
let mut success = false;
) -> ImapResult {
if *server_uid == 0 {
success = true
} else {
info!(
return ImapResult::Failed;
}
if !self.is_connected() {
return ImapResult::RetryLater;
}
info!(
context,
0,
"Marking message \"{}\", {}/{} for deletion...",
message_id.as_ref(),
folder.as_ref(),
server_uid,
);
if !self.select_folder(context, Some(&folder)) {
warn!(
context,
0,
"Marking message \"{}\", {}/{} for deletion...",
message_id.as_ref(),
folder.as_ref(),
server_uid,
"Cannot select folder {} for deleting message.",
folder.as_ref()
);
if self.select_folder(context, Some(&folder)) == 0 {
warn!(
context,
0,
"Cannot select folder {} for deleting message.",
folder.as_ref()
);
} else {
let set = format!("{}", server_uid);
if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_fetch(set, PREFETCH_FLAGS) {
Ok(msgs) => {
if msgs.is_empty()
|| msgs
.first()
.unwrap()
.envelope()
.expect("missing envelope")
.message_id
.expect("missing message id")
!= message_id.as_ref()
{
warn!(
context,
0,
"Cannot delete on IMAP, {}/{} does not match {}.",
folder.as_ref(),
server_uid,
message_id.as_ref(),
);
*server_uid = 0;
}
}
Err(err) => {
eprintln!("fetch error: {:?}", err);
warn!(
context,
0,
"Cannot delete on IMAP, {}/{} not found.",
folder.as_ref(),
server_uid,
);
*server_uid = 0;
}
return ImapResult::RetryLater;
}
let set = format!("{}", server_uid);
if let Some(ref mut session) = &mut *self.session.lock().unwrap() {
match session.uid_fetch(set, PREFETCH_FLAGS) {
Ok(msgs) => {
if msgs.is_empty()
|| msgs
.first()
.unwrap()
.envelope()
.expect("missing envelope")
.message_id
.expect("missing message id")
!= message_id.as_ref()
{
warn!(
context,
0,
"Cannot delete on IMAP, {}/{} does not match {}.",
folder.as_ref(),
server_uid,
message_id.as_ref(),
);
*server_uid = 0;
return ImapResult::Failed;
}
}
// mark the message for deletion
if self.add_flag(context, *server_uid, "\\Deleted") == 0 {
warn!(context, 0, "Cannot mark message as \"Deleted\".");
} else {
self.config.write().unwrap().selected_folder_needs_expunge = true;
success = true
Err(err) => {
warn!(
context,
0,
"Cannot delete on IMAP, {}/{}, fetch error {:?}.",
folder.as_ref(),
server_uid,
err
);
*server_uid = 0;
return ImapResult::Failed;
}
}
}
if success {
1
// mark the message for deletion
if !self.add_flag(context, *server_uid, "\\Deleted") {
warn!(context, 0, "Cannot mark message as \"Deleted\".");
*server_uid = 0;
ImapResult::Failed
} else {
self.is_connected() as usize
self.config.write().unwrap().selected_folder_needs_expunge = true;
ImapResult::Success
}
}

View File

@@ -1,5 +1,4 @@
use std::ffi::CStr;
use std::ptr;
use std::time::Duration;
use deltachat_derive::{FromSql, ToSql};
@@ -82,7 +81,7 @@ pub struct Job {
pub added_timestamp: i64,
pub tries: i32,
pub param: Params,
pub try_again: i32,
pub try_again: Delay,
pub pending_error: Option<String>,
}
@@ -111,167 +110,132 @@ impl Job {
#[allow(non_snake_case)]
fn do_DC_JOB_SEND(&mut self, context: &Context) {
let ok_to_continue;
let mut filename = ptr::null_mut();
let mut buf = ptr::null_mut();
let mut buf_bytes = 0;
/* connect to SMTP server, if not yet done */
if !context.smtp.lock().unwrap().is_connected() {
let loginparam = dc_loginparam_read(context, &context.sql, "configured_");
let connected = context.smtp.lock().unwrap().connect(context, &loginparam);
if !connected {
self.try_again_later(3i32, None);
ok_to_continue = false;
} else {
ok_to_continue = true;
self.try_again_later(Delay::Standard, None);
return;
}
} else {
ok_to_continue = true;
}
if ok_to_continue {
let filename_s = self.param.get(Param::File).unwrap_or_default();
filename = unsafe { filename_s.strdup() };
if unsafe { strlen(filename) } == 0 {
warn!(context, 0, "Missing file name for job {}", self.job_id,);
} else if 0 != unsafe { dc_read_file(context, filename, &mut buf, &mut buf_bytes) } {
let recipients = self.param.get(Param::Recipients);
if recipients.is_none() {
warn!(context, 0, "Missing recipients for job {}", self.job_id,);
} else {
let recipients_list = recipients
.unwrap()
.split("\x1e")
.filter_map(|addr| match lettre::EmailAddress::new(addr.to_string()) {
Ok(addr) => Some(addr),
Err(err) => {
eprintln!("WARNING: invalid recipient: {} {:?}", addr, err);
None
}
})
.collect::<Vec<_>>();
/* if there is a msg-id and it does not exist in the db, cancel sending.
this happends if dc_delete_msgs() was called
before the generated mime was sent out */
let ok_to_continue1;
if 0 != self.foreign_id {
if 0 == unsafe { dc_msg_exists(context, self.foreign_id) } {
warn!(
context,
0,
"Message {} for job {} does not exist",
self.foreign_id,
self.job_id,
);
ok_to_continue1 = false;
} else {
ok_to_continue1 = true;
}
} else {
ok_to_continue1 = true;
}
if ok_to_continue1 {
/* send message */
let body = unsafe {
std::slice::from_raw_parts(buf as *const u8, buf_bytes).to_vec()
};
let filename = self.param.get(Param::File).unwrap_or_default();
let body = match dc_read_file_safe(context, filename) {
Some(bytes) => bytes,
None => {
warn!(context, 0, "job {} error", self.job_id);
return;
}
};
// hold the smtp lock during sending of a job and
// its ok/error response processing. Note that if a message
// was sent we need to mark it in the database as we
// otherwise might send it twice.
let mut sock = context.smtp.lock().unwrap();
if 0 == sock.send(context, recipients_list, body) {
sock.disconnect();
self.try_again_later(-1i32, Some(as_str(sock.error)));
} else {
dc_delete_file(context, filename_s);
if 0 != self.foreign_id {
dc_update_msg_state(
context,
self.foreign_id,
MessageState::OutDelivered,
);
let chat_id: i32 = context
.sql
.query_row_col(
context,
"SELECT chat_id FROM msgs WHERE id=?",
params![self.foreign_id as i32],
0,
)
.unwrap_or_default();
context.call_cb(
Event::MSG_DELIVERED,
chat_id as uintptr_t,
self.foreign_id as uintptr_t,
);
}
}
}
let recipients = self.param.get(Param::Recipients);
if recipients.is_none() {
error!(context, 0, "Missing recipients for job {}", self.job_id,);
return;
}
let recipients_list = recipients
.unwrap()
.split("\x1e")
.filter_map(|addr| match lettre::EmailAddress::new(addr.to_string()) {
Ok(addr) => Some(addr),
Err(err) => {
eprintln!("WARNING: invalid recipient: {} {:?}", addr, err);
None
}
})
.collect::<Vec<_>>();
/* if there is a msg-id and it does not exist in the db, cancel sending.
this happends if dc_delete_msgs() was called
before the generated mime was sent out */
if 0 != self.foreign_id {
if !dc_msg_exists(context, self.foreign_id) {
warn!(
context,
0, "Message {} for job {} does not exist", self.foreign_id, self.job_id,
);
return;
}
}
unsafe { free(buf) };
unsafe { free(filename.cast()) };
/* send message while holding the smtp lock long enough
to also mark success in the database, to reduce chances
of a message getting sent twice.
*/
let mut sock = context.smtp.lock().unwrap();
if 0 == sock.send(context, recipients_list, body) {
sock.disconnect();
self.try_again_later(Delay::AtOnce, Some(as_str(sock.error)));
return;
}
dc_delete_file(context, filename);
if 0 != self.foreign_id {
dc_update_msg_state(context, self.foreign_id, MessageState::OutDelivered);
let chat_id: i32 = context
.sql
.query_row_col(
context,
"SELECT chat_id FROM msgs WHERE id=?",
params![self.foreign_id as i32],
0,
)
.unwrap_or_default();
context.call_cb(
Event::MSG_DELIVERED,
chat_id as uintptr_t,
self.foreign_id as uintptr_t,
);
}
}
// this value does not increase the number of tries
fn try_again_later(&mut self, try_again: libc::c_int, pending_error: Option<&str>) {
fn try_again_later(&mut self, try_again: Delay, pending_error: Option<&str>) {
self.try_again = try_again;
self.pending_error = pending_error.map(|s| s.to_string());
}
#[allow(non_snake_case)]
fn do_DC_JOB_MOVE_MSG(&mut self, context: &Context) {
let ok_to_continue;
let mut dest_uid = 0;
let inbox = context.inbox.read().unwrap();
if !inbox.is_connected() {
connect_to_inbox(context, &inbox);
if !inbox.is_connected() {
self.try_again_later(3, None);
ok_to_continue = false;
} else {
ok_to_continue = true;
}
} else {
ok_to_continue = true;
if !connect_to_inbox(context, &inbox) {
self.try_again_later(Delay::Standard, None);
return;
}
if ok_to_continue {
if let Ok(msg) = dc_msg_load_from_db(context, self.foreign_id) {
if context
.sql
.get_config_int(context, "folders_configured")
.unwrap_or_default()
< 3
{
inbox.configure_folders(context, 0x1i32);
}
let dest_folder = context.sql.get_config(context, "configured_mvbox_folder");
if let Ok(msg) = dc_msg_load_from_db(context, self.foreign_id) {
if context
.sql
.get_config_int(context, "folders_configured")
.unwrap_or_default()
< 3
{
inbox.configure_folders(context, 0x1i32);
}
let dest_folder = context.sql.get_config(context, "configured_mvbox_folder");
if let Some(dest_folder) = dest_folder {
let server_folder = msg.server_folder.as_ref().unwrap();
if let Some(dest_folder) = dest_folder {
let server_folder = msg.server_folder.as_ref().unwrap();
let mut dest_uid = 0;
match inbox.mv(
context,
server_folder,
msg.server_uid,
&dest_folder,
&mut dest_uid,
) as libc::c_uint
{
1 => {
self.try_again_later(3i32, None);
}
3 => {
dc_update_server_uid(context, msg.rfc724_mid, &dest_folder, dest_uid);
}
0 | 2 | _ => {}
match inbox.mv(
context,
server_folder,
msg.server_uid,
&dest_folder,
&mut dest_uid,
) {
ImapResult::RetryLater => {
self.try_again_later(Delay::Standard, None);
}
ImapResult::Success => {
// TODO: dest_uid is not (yet) set by mv() so remains 0
dc_update_server_uid(
context,
as_str(msg.rfc724_mid),
&dest_folder,
dest_uid,
);
}
_ => {}
}
}
}
@@ -279,53 +243,34 @@ impl Job {
#[allow(non_snake_case)]
fn do_DC_JOB_DELETE_MSG_ON_IMAP(&mut self, context: &Context) {
let mut delete_from_server = 1;
let inbox = context.inbox.read().unwrap();
if let Ok(mut msg) = dc_msg_load_from_db(context, self.foreign_id) {
if !(msg.rfc724_mid.is_null()
|| unsafe { *msg.rfc724_mid.offset(0isize) as libc::c_int == 0 })
{
let ok_to_continue1;
/* eg. device messages have no Message-ID */
if dc_rfc724_mid_cnt(context, msg.rfc724_mid) != 1 {
info!(
context,
0, "The message is deleted from the server when all parts are deleted.",
);
delete_from_server = 0i32
return;
}
/* if this is the last existing part of the message, we delete the message from the server */
if 0 != delete_from_server {
let ok_to_continue;
if !inbox.is_connected() {
connect_to_inbox(context, &inbox);
if !inbox.is_connected() {
self.try_again_later(3i32, None);
ok_to_continue = false;
} else {
ok_to_continue = true;
}
} else {
ok_to_continue = true;
}
if ok_to_continue {
let mid = unsafe { CStr::from_ptr(msg.rfc724_mid).to_str().unwrap() };
let server_folder = msg.server_folder.as_ref().unwrap();
if 0 == inbox.delete_msg(context, mid, server_folder, &mut msg.server_uid) {
self.try_again_later(-1i32, None);
ok_to_continue1 = false;
} else {
ok_to_continue1 = true;
}
} else {
ok_to_continue1 = false;
}
} else {
ok_to_continue1 = true;
if !connect_to_inbox(context, &inbox) {
self.try_again_later(Delay::Standard, None);
return;
}
if ok_to_continue1 {
dc_delete_msg_from_db(context, msg.id);
let mid = unsafe { CStr::from_ptr(msg.rfc724_mid).to_str().unwrap() };
let server_folder = msg.server_folder.as_ref().unwrap();
match inbox.delete_msg(context, mid, server_folder, &mut msg.server_uid) {
ImapResult::RetryLater => {
self.try_again_later(Delay::AtOnce, None);
}
_ => {
dc_delete_msg_from_db(context, msg.id);
}
}
}
}
@@ -333,57 +278,71 @@ impl Job {
#[allow(non_snake_case)]
fn do_DC_JOB_MARKSEEN_MSG_ON_IMAP(&mut self, context: &Context) {
let ok_to_continue;
let inbox = context.inbox.read().unwrap();
if !inbox.is_connected() {
connect_to_inbox(context, &inbox);
if !inbox.is_connected() {
self.try_again_later(3i32, None);
ok_to_continue = false;
} else {
ok_to_continue = true;
}
} else {
ok_to_continue = true;
if !connect_to_inbox(context, &inbox) {
self.try_again_later(Delay::Standard, None);
return;
}
if ok_to_continue {
if let Ok(msg) = dc_msg_load_from_db(context, self.foreign_id) {
let server_folder = msg.server_folder.as_ref().unwrap();
match inbox.set_seen(context, server_folder, msg.server_uid) as libc::c_uint {
0 => {}
1 => {
self.try_again_later(3i32, None);
}
_ => {
if 0 != msg.param.get_int(Param::WantsMdn).unwrap_or_default()
&& 0 != context
.sql
.get_config_int(context, "mdns_enabled")
.unwrap_or_else(|| 1)
{
let folder = msg.server_folder.as_ref().unwrap();
match inbox.set_mdnsent(context, folder, msg.server_uid) as libc::c_uint
{
1 => {
self.try_again_later(3i32, None);
}
3 => {
send_mdn(context, msg.id);
}
0 | 2 | _ => {}
}
}
}
if let Ok(msg) = dc_msg_load_from_db(context, self.foreign_id) {
if msg.server_uid == 0 {
// the message has likely been moved
// try again later
self.try_again_later(Delay::Standard, None);
return;
}
let server_folder = msg.server_folder.as_ref().unwrap();
info!(context, 0, "job_markseen_msg db id={}", self.foreign_id);
match inbox.set_seen(context, server_folder, msg.server_uid) {
ImapResult::Failed => {
info!(context, 0, "job_markseen_msg failed");
return;
}
ImapResult::RetryLater => {
info!(context, 0, "job_markseen_msg retry-later");
self.try_again_later(Delay::Standard, None);
return;
}
_ => {}
};
info!(
context,
0,
"set_mdnsent wantsmdn={} mdns_enabled={}",
msg.param.get_int(Param::WantsMdn).unwrap_or_default(),
context
.sql
.get_config_int(context, "mdns_enabled")
.unwrap_or_else(|| 1)
);
/* XXX
if 0 != msg.param.get_int(Param::WantsMdn).unwrap_or_default()
&& 0 != context
.sql
.get_config_int(context, "mdns_enabled")
.unwrap_or_else(|| 1)
*/
{
let folder = msg.server_folder.as_ref().unwrap();
match inbox.set_mdnsent(context, folder, msg.server_uid) {
ImapResult::RetryLater => {
self.try_again_later(Delay::Standard, None);
}
ImapResult::Success => {
send_mdn(context, msg.id);
}
ImapResult::AlreadyDone => {}
ImapResult::Failed => {}
};
}
}
}
#[allow(non_snake_case)]
fn do_DC_JOB_MARKSEEN_MDN_ON_IMAP(&mut self, context: &Context) {
let ok_to_continue;
let folder = self
.param
.get(Param::ServerFolder)
@@ -393,37 +352,37 @@ impl Job {
let mut dest_uid = 0;
let inbox = context.inbox.read().unwrap();
if !inbox.is_connected() {
connect_to_inbox(context, &inbox);
if !inbox.is_connected() {
self.try_again_later(3, None);
ok_to_continue = false;
} else {
ok_to_continue = true;
}
} else {
ok_to_continue = true;
if !connect_to_inbox(context, &inbox) {
self.try_again_later(Delay::Standard, None);
return;
}
if ok_to_continue {
if inbox.set_seen(context, &folder, uid) == 0 {
self.try_again_later(3i32, None);
match inbox.set_seen(context, &folder, uid) {
ImapResult::RetryLater => {
self.try_again_later(Delay::Standard, None);
return;
}
if 0 != self.param.get_int(Param::AlsoMove).unwrap_or_default() {
if context
.sql
.get_config_int(context, "folders_configured")
.unwrap_or_default()
< 3
{
inbox.configure_folders(context, 0x1i32);
}
let dest_folder = context.sql.get_config(context, "configured_mvbox_folder");
if let Some(dest_folder) = dest_folder {
if 1 == inbox.mv(context, folder, uid, dest_folder, &mut dest_uid)
as libc::c_uint
{
self.try_again_later(3, None);
ImapResult::Failed => {
return;
}
_ => {}
};
if 0 != self.param.get_int(Param::AlsoMove).unwrap_or_default() {
if context
.sql
.get_config_int(context, "folders_configured")
.unwrap_or_default()
< 3
{
inbox.configure_folders(context, 0x1i32);
}
let dest_folder = context.sql.get_config(context, "configured_mvbox_folder");
if let Some(dest_folder) = dest_folder {
match inbox.mv(context, folder, uid, dest_folder, &mut dest_uid) {
ImapResult::RetryLater => {
self.try_again_later(Delay::Standard, None);
}
_ => {}
}
}
}
@@ -445,7 +404,7 @@ pub fn perform_imap_fetch(context: &Context) {
let inbox = context.inbox.read().unwrap();
let start = std::time::Instant::now();
if 0 == connect_to_inbox(context, &inbox) {
if !connect_to_inbox(context, &inbox) {
return;
}
if context
@@ -650,6 +609,21 @@ pub fn maybe_network(context: &Context) {
interrupt_sentbox_idle(context);
}
pub fn job_retry_msg_actions_at_once(context: &Context, msg_id: uint32_t) -> bool {
match sql::execute(
context,
&context.sql,
"UPDATE jobs SET desired_timestamp=? WHERE foreign_id=?;",
params![time(), msg_id],
) {
Ok(_) => true,
Err(_err) => {
info!(context, 0, "job-retry-at-once failed {:?}", _err);
false
}
}
}
pub fn job_action_exists(context: &Context, action: Action) -> bool {
context
.sql
@@ -811,7 +785,7 @@ fn job_perform(context: &Context, thread: Thread, probe_network: bool) {
added_timestamp: row.get(4)?,
tries: row.get(6)?,
param: row.get::<_, String>(3)?.parse().unwrap_or_default(),
try_again: 0,
try_again: Delay::Default,
pending_error: None,
};
@@ -843,7 +817,7 @@ fn job_perform(context: &Context, thread: Thread, probe_network: bool) {
// some configuration jobs are "exclusive":
// - they are always executed in the imap-thread and the smtp-thread is suspended during execution
// - they may change the database handle change the database handle; we do not keep old pointers therefore
// - they can be re-executed one time AT_ONCE, but they are not save in the database for later execution
// - they can be re-executed one time AtOnce, but they are not save in the database for later execution
if Action::ConfigureImap == job.action || Action::ImexImap == job.action {
job_kill_action(context, job.action);
&context
@@ -863,8 +837,8 @@ fn job_perform(context: &Context, thread: Thread, probe_network: bool) {
let mut tries = 0;
while tries <= 1 {
// this can be modified by a job using dc_job_try_again_later()
job.try_again = 0;
// this can be modified by a job using try_again_later()
job.try_again = Delay::Default;
match job.action {
Action::SendMsgToSmtp => job.do_DC_JOB_SEND(context),
@@ -885,7 +859,7 @@ fn job_perform(context: &Context, thread: Thread, probe_network: bool) {
Action::SendMdnOld => {}
Action::SendMsgToSmtpOld => {}
}
if job.try_again != -1 {
if job.try_again != Delay::AtOnce {
break;
}
tries += 1
@@ -905,7 +879,7 @@ fn job_perform(context: &Context, thread: Thread, probe_network: bool) {
.unsuspend(context);
suspend_smtp_thread(context, false);
break;
} else if job.try_again == 2 {
} else if job.try_again == Delay::IncreationPoll {
// just try over next loop unconditionally, the ui typically interrupts idle when the file (video) is ready
info!(
context,
@@ -918,7 +892,7 @@ fn job_perform(context: &Context, thread: Thread, probe_network: bool) {
},
job.job_id
);
} else if job.try_again == -1 || job.try_again == 3 {
} else if job.try_again == Delay::AtOnce || job.try_again == Delay::Standard {
let tries = job.tries + 1;
if tries < 17 {
job.tries = tries;
@@ -994,15 +968,21 @@ fn suspend_smtp_thread(context: &Context, suspend: bool) {
}
}
fn connect_to_inbox(context: &Context, inbox: &Imap) -> libc::c_int {
let ret_connected = dc_connect_to_configured_imap(context, inbox);
if 0 != ret_connected {
inbox.set_watch_folder("INBOX".into());
fn connect_to_inbox(context: &Context, inbox: &Imap) -> bool {
if inbox.is_connected() {
true
} else {
let ret_connected = dc_connect_to_configured_imap(context, inbox);
if 0 != ret_connected {
inbox.set_watch_folder("INBOX".into());
true
} else {
false
}
}
ret_connected
}
fn send_mdn(context: &Context, msg_id: uint32_t) {
pub fn send_mdn(context: &Context, msg_id: uint32_t) {
if let Ok(mut mimefactory) = unsafe { dc_mimefactory_load_mdn(context, msg_id) } {
if 0 != unsafe { dc_mimefactory_render(&mut mimefactory) } {
add_smtp_job(context, Action::SendMdn, &mut mimefactory);

View File

@@ -94,6 +94,7 @@ pub enum LotState {
MsgInFresh = 10,
MsgInNoticed = 13,
MsgInSeen = 16,
MsgInMDNSent = 17,
MsgOutPreparing = 18,
MsgOutDraft = 19,
MsgOutPending = 20,

View File

@@ -30,6 +30,7 @@ pub enum MessageState {
InFresh = 10,
InNoticed = 13,
InSeen = 16,
InMDNSent = 17,
OutPreparing = 18,
OutDraft = 19,
OutPending = 20,
@@ -46,6 +47,7 @@ impl From<MessageState> for LotState {
InFresh => LotState::MsgInFresh,
InNoticed => LotState::MsgInNoticed,
InSeen => LotState::MsgInSeen,
InMDNSent => LotState::MsgInMDNSent,
OutPreparing => LotState::MsgOutPreparing,
OutDraft => LotState::MsgOutDraft,
OutPending => LotState::MsgOutPending,
@@ -1033,39 +1035,32 @@ The value is also used for CC:-summaries */
// Context functions to work with messages
pub unsafe fn dc_msg_exists(context: &Context, msg_id: u32) -> libc::c_int {
if msg_id <= 9 {
return 0;
}
pub fn dc_msg_exists(context: &Context, msg_id: u32) -> bool {
if msg_id > DC_MSG_ID_LAST_SPECIAL {
let chat_id: Option<i32> = context.sql.query_row_col(
context,
"SELECT chat_id FROM msgs WHERE id=?;",
params![msg_id as i32],
0,
);
let chat_id: Option<i32> = context.sql.query_row_col(
context,
"SELECT chat_id FROM msgs WHERE id=?;",
params![msg_id as i32],
0,
);
if let Some(chat_id) = chat_id {
if chat_id != 3 {
return 1;
if let Some(chat_id) = chat_id {
if chat_id != 3 {
return true;
}
}
}
0
false
}
pub fn dc_update_msg_move_state(
context: &Context,
rfc724_mid: *const libc::c_char,
state: MoveState,
) -> bool {
pub fn dc_update_msg_move_state(context: &Context, rfc724_mid: &str, state: MoveState) -> bool {
// we update the move_state for all messages belonging to a given Message-ID
// so that the state stay intact when parts are deleted
sql::execute(
context,
&context.sql,
"UPDATE msgs SET move_state=? WHERE rfc724_mid=?;",
params![state as i32, as_str(rfc724_mid)],
params![state as i32, rfc724_mid],
)
.is_ok()
}
@@ -1243,6 +1238,29 @@ pub fn dc_rfc724_mid_cnt(context: &Context, rfc724_mid: *const libc::c_char) ->
}
}
pub fn dc_rfc724_mid_exists_with_msg_state(
context: &Context,
rfc724_mid: &str,
) -> Option<(u32, MessageState, String, u32)> {
if rfc724_mid.is_empty() {
return None;
}
match context.sql.query_row(
"SELECT id, state, server_folder, server_uid FROM msgs WHERE rfc724_mid=?",
&[rfc724_mid],
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?)),
) {
Ok(res) => Some(res),
Err(_err) => {
info!(
context,
0, "dc_rfc724_mid_exists_with_msg_state ERR={:?}", _err
);
None
}
}
}
pub fn dc_rfc724_mid_exists(
context: &Context,
rfc724_mid: *const libc::c_char,
@@ -1281,13 +1299,13 @@ pub fn dc_rfc724_mid_exists(
pub fn dc_update_server_uid(
context: &Context,
rfc724_mid: *const libc::c_char,
rfc724_mid: &str,
server_folder: impl AsRef<str>,
server_uid: u32,
) {
match context.sql.execute(
"UPDATE msgs SET server_folder=?, server_uid=? WHERE rfc724_mid=?;",
params![server_folder.as_ref(), server_uid, as_str(rfc724_mid)],
params![server_folder.as_ref(), server_uid, rfc724_mid],
) {
Ok(_) => {}
Err(err) => {