Compare commits

..

1 Commits

Author SHA1 Message Date
link2xt
959ca06691 python: fail fast on the tests
Do not waste CI time running the rest of the tests
if CI is not going to be green anyway.
2023-03-22 12:35:27 +00:00
66 changed files with 925 additions and 2401 deletions

View File

@@ -77,9 +77,6 @@ jobs:
- os: windows-latest
rust: 1.68.0
python: false # Python bindings compilation on Windows is not supported.
- os: macos-latest
rust: 1.68.0
python: 3.9
# Minimum Supported Rust Version = 1.64.0
#

View File

@@ -84,15 +84,35 @@ jobs:
contents: write
runs-on: "ubuntu-latest"
steps:
- name: Download built binaries
- name: Download deltachat-rpc-server-x86_64
uses: "actions/download-artifact@v3"
with:
name: "deltachat-rpc-server-x86_64"
path: "dist/deltachat-rpc-server-x86_64"
- name: Compose dist/ directory
run: |
mkdir dist
for x in x86_64 aarch64 armv7 win32.exe win64.exe; do
mv "deltachat-rpc-server-$x"/* "dist/deltachat-rpc-server-$x"
done
- name: Download deltachat-rpc-server-aarch64
uses: "actions/download-artifact@v3"
with:
name: "deltachat-rpc-server-aarch64"
path: "dist/deltachat-rpc-server-aarch64"
- name: Download deltachat-rpc-server-armv7
uses: "actions/download-artifact@v3"
with:
name: "deltachat-rpc-server-armv7"
path: "dist/deltachat-rpc-server-armv7"
- name: Download deltachat-rpc-server-win32.exe
uses: "actions/download-artifact@v3"
with:
name: "deltachat-rpc-server-win32.exe"
path: "dist/deltachat-rpc-server-win32.exe"
- name: Download deltachat-rpc-server-win64.exe
uses: "actions/download-artifact@v3"
with:
name: "deltachat-rpc-server-win64.exe"
path: "dist/deltachat-rpc-server-win64.exe"
- name: List downloaded artifacts
run: ls -l dist/
@@ -103,4 +123,4 @@ jobs:
run: |
gh release upload ${{ github.ref_name }} \
--repo ${{ github.repository }} \
dist/*
dist/deltachat-rpc-server-*

View File

@@ -1,88 +1,18 @@
# Changelog
## [1.112.9] - 2023-05-12
### Fixes
- Fetch at most 100 existing messages even if EXISTS was not received.
- Delete `smtp` rows when message sending is cancelled.
### Changes
- Improve SMTP logging.
## [1.112.8] - 2023-04-20
### Changes
- Add `get_http_response` JSON-RPC API.
- Add C API to get HTTP responses.
## [1.112.7] - 2023-04-17
### Fixes
- Updated `async-imap` to v0.8.0 to fix erroneous EOF detection in long IMAP responses.
## [1.112.6] - 2023-04-04
### Changes
- Add a device message after backup transfer #4301
### Fixed
- Updated `iroh` from 0.4.0 to 0.4.1 to fix transfer of large accounts with many blob files.
## [1.112.5] - 2023-04-02
### Fixes
- Run SQL database migrations after receiving a backup from the network. #4287
## [1.112.4] - 2023-03-31
### Fixes
- Fix call to `auditwheel` in `scripts/run_all.sh`.
## [1.112.3] - 2023-03-30
### Fixes
- `transfer::get_backup` now frees ongoing process when cancelled. #4249
## [1.112.2] - 2023-03-30
### Changes
- Update iroh, remove `default-net` from `[patch.crates-io]` section.
- transfer backup: Connect to multiple provider addresses concurrently. This should speed up connection time significantly on the getter side. #4240
- Make sure BackupProvider is cancelled on drop (or `dc_backup_provider_unref`). The BackupProvider will now always finish with an IMEX event of 1000 or 0, previously it would sometimes finished with 1000 (success) when it really was 0 (failure). #4242
### Fixes
- Do not return media from trashed messages in the "All media" view. #4247
## [1.112.1] - 2023-03-27
### Changes
- Add support for `--version` argument to `deltachat-rpc-server`. #4224
It can be used to check the installed version without starting the server.
### Fixes
- deltachat-rpc-client: fix bug in `Chat.send_message()`: invalid `MessageData` field `quotedMsg` instead of `quotedMsgId`
- `receive_imf`: Mark special messages as seen. Exactly: delivery reports, webxdc status updates. #4230
## [1.112.0] - 2023-03-23
## [Unreleased]
### Changes
- "full message view" not needed because of footers that go to contact status #4151
- Pick up system's light/dark mode in generated message HTML #4150
- Support non-persistent configuration with DELTACHAT_* env
- Print deltachat-repl errors with causes. #4166
- Increase MSRV to 1.64. #4167
- Core takes care of stopping and re-starting IO itself where needed,
e.g. during backup creation.
It is no longer needed to call `dc_stop_io()`.
`dc_start_io()` can now be called at any time without harm. #4138
- Pick up system's light/dark mode in generated message HTML. #4150
- More accurate `maybe_add_bcc_self` device message text. #4175
- "Full message view" not needed because of footers that go to contact status. #4151
- Support non-persistent configuration with `DELTACHAT_*` env. #4154
- Print deltachat-repl errors with causes. #4166
e.g. during backup creation. It is no longer needed to call
dc_stop_io(). dc_start_io() can now be called at any time without
harm. #4138
- More accurate maybe_add_bcc_self device message text #4175
### Fixes
- Fix segmentation fault if `dc_context_unref()` is called during
@@ -93,11 +23,6 @@
- Do not emit "Failed to run incremental vacuum" warnings on success. #4160
- Ability to send backup over network and QR code to setup second device #4007
- Disable buffering during STARTTLS setup. #4190
- Add `DC_EVENT_IMAP_INBOX_IDLE` event to wait until the account
is ready for testing.
It is used to fix race condition between fetching
existing messages and starting the test. #4208
## [1.111.0] - 2023-03-05
@@ -2389,12 +2314,5 @@ For a full list of changes, please see our closed Pull Requests:
https://github.com/deltachat/deltachat-core-rust/pulls?q=is%3Apr+is%3Aclosed
[unreleased]: https://github.com/deltachat/deltachat-core-rust/compare/v1.111.0...HEAD
[1.111.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.110.0...v1.111.0
[1.112.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.111.0...v1.112.0
[1.112.1]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.0...v1.112.1
[1.112.2]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.1...v1.112.2
[1.112.3]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.2...v1.112.3
[1.112.4]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.3...v1.112.4
[1.112.5]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.4...v1.112.5
[1.112.6]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.5...v1.112.6
[1.112.7]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.6...v1.112.7

506
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat"
version = "1.112.9"
version = "1.111.0"
edition = "2021"
license = "MPL-2.0"
rust-version = "1.64"
@@ -25,6 +25,7 @@ panic = 'abort'
opt-level = "z"
[patch.crates-io]
default-net = { git = "https://github.com/dignifiedquire/default-net.git", branch="feat-android" }
quinn-udp = { git = "https://github.com/quinn-rs/quinn", branch="main" }
quinn-proto = { git = "https://github.com/quinn-rs/quinn", branch="main" }
@@ -35,10 +36,10 @@ ratelimit = { path = "./deltachat-ratelimit" }
anyhow = "1"
async-channel = "1.8.0"
async-imap = { version = "0.8.0", default-features = false, features = ["runtime-tokio"] }
async-imap = { git = "https://github.com/async-email/async-imap", branch = "master", default-features = false, features = ["runtime-tokio"] }
async-native-tls = { version = "0.5", default-features = false, features = ["runtime-tokio"] }
async-smtp = { version = "0.9", default-features = false, features = ["runtime-tokio"] }
async_zip = { version = "0.0.11", default-features = false, features = ["deflate", "fs"] }
async_zip = { version = "0.0.9", default-features = false, features = ["deflate"] }
backtrace = "0.3"
base64 = "0.21"
bitflags = "1.3"
@@ -52,12 +53,12 @@ futures-lite = "1.12.0"
hex = "0.4.0"
humansize = "2"
image = { version = "0.24.5", default-features=false, features = ["gif", "jpeg", "ico", "png", "pnm", "webp", "bmp"] }
iroh = { version = "0.4.1", default-features = false }
# iroh = { version = "0.3.0", default-features = false }
iroh = { git = 'https://github.com/n0-computer/iroh', branch = "flub/ticket-multiple-addrs" }
kamadak-exif = "0.5"
lettre_email = { git = "https://github.com/deltachat/lettre", branch = "master" }
libc = "0.2"
mailparse = "0.14"
mime = "0.3.17"
num_cpus = "1.15"
num-derive = "0.3"
num-traits = "0.2"
@@ -84,11 +85,10 @@ strum_macros = "0.24"
tagger = "4.3.4"
textwrap = "0.16.0"
thiserror = "1"
tokio = { version = "1", features = ["fs", "rt-multi-thread", "macros"] }
tokio-io-timeout = "1.2.0"
tokio-stream = { version = "0.1.11", features = ["fs"] }
tokio-tar = { version = "0.3" } # TODO: integrate tokio into async-tar
tokio-util = "0.7.7"
tokio = { version = "1", features = ["fs", "rt-multi-thread", "macros"] }
toml = "0.7"
trust-dns-resolver = "0.22"
url = "2"

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat_ffi"
version = "1.112.9"
version = "1.111.0"
description = "Deltachat FFI"
edition = "2018"
readme = "README.md"
@@ -23,7 +23,7 @@ serde_json = "1.0"
tokio = { version = "1", features = ["rt-multi-thread"] }
anyhow = "1"
thiserror = "1"
rand = "0.8"
rand = "0.7"
once_cell = "1.17.0"
[features]

View File

@@ -25,7 +25,6 @@ typedef struct _dc_event dc_event_t;
typedef struct _dc_event_emitter dc_event_emitter_t;
typedef struct _dc_jsonrpc_instance dc_jsonrpc_instance_t;
typedef struct _dc_backup_provider dc_backup_provider_t;
typedef struct _dc_http_response dc_http_response_t;
// Alias for backwards compatibility, use dc_event_emitter_t instead.
typedef struct _dc_event_emitter dc_accounts_event_emitter_t;
@@ -73,6 +72,7 @@ typedef struct _dc_event_emitter dc_accounts_event_emitter_t;
*
* The example above uses "pthreads",
* however, you can also use anything else for thread handling.
* All deltachat-core functions, unless stated otherwise, are thread-safe.
*
* Now you can **configure the context:**
*
@@ -142,67 +142,6 @@ typedef struct _dc_event_emitter dc_accounts_event_emitter_t;
* ~~~
*
*
* ## Thread safety
*
* All deltachat-core functions, unless stated otherwise, are thread-safe.
* In particular, it is safe to pass the same dc_context_t pointer
* to multiple functions running concurrently in different threads.
*
* All the functions are guaranteed not to use the reference passed to them
* after returning. If the function spawns a long-running process,
* such as dc_configure() or dc_imex(), it will ensure that the objects
* passed to them are not deallocated as long as they are needed.
* For example, it is safe to call dc_imex(context, ...) and
* call dc_context_unref(context) immediately after return from dc_imex().
* It is however **not safe** to call dc_context_unref(context) concurrently
* until dc_imex() returns, because dc_imex() may have not increased
* the reference count of dc_context_t yet.
*
* This means that the context may be still in use after
* dc_context_unref() call.
* For example, it is possible to start the import/export process,
* call dc_context_unref(context) immediately after
* and observe #DC_EVENT_IMEX_PROGRESS events via the event emitter.
* Once dc_get_next_event() returns NULL,
* it is safe to terminate the application.
*
* It is recommended to create dc_context_t in the main thread
* and only call dc_context_unref() once other threads that may use it,
* such as the event loop thread, are terminated.
* Common mistake is to use dc_context_unref() as a way
* to cause dc_get_next_event() return NULL and terminate event loop this way.
* If event loop thread is inside a function taking dc_context_t
* as an argument at the moment dc_context_unref() is called on the main thread,
* the behavior is undefined.
*
* Recommended way to safely terminate event loop
* and shutdown the application is
* to use a boolean variable
* indicating that the event loop should stop
* and check it in the event loop thread
* every time before calling dc_get_next_event().
* To terminate the event loop, main thread should:
* 1. Notify event loop that it should terminate by atomically setting the
* boolean flag in the memory shared between the main thread and event loop.
* 2. Call dc_stop_io() or dc_accounts_stop_io(), depending
* on whether a single account or account manager is used.
* Stopping I/O is guaranteed to emit at least one event
* and interrupt the event loop even if it was blocked on dc_get_next_event().
* 3. Wait until the event loop thread notices the flag,
* exits the event loop and terminates.
* 4. Call dc_context_unref() or dc_accounts_unref().
* 5. Keep calling dc_get_next_event() in a loop until it returns NULL,
* indicating that the contexts are deallocated.
* 6. Terminate the application.
*
* When using C API via FFI in runtimes that use automatic memory management,
* such as CPython, JVM or Node.js, take care to ensure the correct
* shutdown order and avoid calling dc_context_unref() or dc_accounts_unref()
* on the objects still in use in other threads,
* e.g. by keeping a reference to the wrapper object.
* The details depend on the runtime being used.
*
*
* ## Class reference
*
* For a class reference, see the "Classes" link atop.
@@ -2776,12 +2715,6 @@ void dc_backup_provider_wait (dc_backup_provider_t* backup_provider);
/**
* Frees a dc_backup_provider_t object.
*
* If the provider has not yet finished, as indicated by
* dc_backup_provider_wait() or the #DC_EVENT_IMEX_PROGRESS event with value
* of 0 (failed) or 1000 (succeeded), this will also abort any in-progress
* transfer. If this aborts the provider a #DC_EVENT_IMEX_PROGRESS event with
* value 0 (failed) will be emitted.
*
* @memberof dc_backup_provider_t
* @param backup_provider The backup provider object as created by
* dc_backup_provider_new().
@@ -5058,72 +4991,6 @@ int dc_provider_get_status (const dc_provider_t* prov
void dc_provider_unref (dc_provider_t* provider);
/**
* Return an HTTP(S) GET response.
* This function can be used to download remote content for HTML emails.
*
* @memberof dc_context_t
* @param context The context object to take proxy settings from.
* @param url HTTP or HTTPS URL.
* @return The response must be released using dc_http_response_unref() after usage.
* NULL is returned on errors.
*/
dc_http_response_t* dc_get_http_response (const dc_context_t* context, const char* url);
/**
* @class dc_http_response_t
*
* An object containing an HTTP(S) GET response.
* Created by dc_get_http_response().
*/
/**
* Returns HTTP response MIME type as a string, e.g. "text/plain" or "text/html".
*
* @memberof dc_http_response_t
* @param response HTTP response as returned by dc_get_http_response().
* @return The string which must be released using dc_str_unref() after usage. May be NULL.
*/
char* dc_http_response_get_mimetype (const dc_http_response_t* response);
/**
* Returns HTTP response encoding, e.g. "utf-8".
*
* @memberof dc_http_response_t
* @param response HTTP response as returned by dc_get_http_response().
* @return The string which must be released using dc_str_unref() after usage. May be NULL.
*/
char* dc_http_response_get_encoding (const dc_http_response_t* response);
/**
* Returns HTTP response contents.
*
* @memberof dc_http_response_t
* @param response HTTP response as returned by dc_get_http_response().
* @return The blob which must be released using dc_str_unref() after usage. NULL is never returned.
*/
uint8_t* dc_http_response_get_blob (const dc_http_response_t* response);
/**
* Returns HTTP response content size.
*
* @memberof dc_http_response_t
* @param response HTTP response as returned by dc_get_http_response().
* @return The blob size.
*/
size_t dc_http_response_get_size (const dc_http_response_t* response);
/**
* Free an HTTP response object.
*
* @memberof dc_http_response_t
* @param response HTTP response as returned by dc_get_http_response().
*/
void dc_http_response_unref (const dc_http_response_t* response);
/**
* @class dc_lot_t
*
@@ -5601,6 +5468,7 @@ void dc_reactions_unref (dc_reactions_t* reactions);
*/
/**
* @class dc_jsonrpc_instance_t
*
@@ -5837,14 +5705,6 @@ void dc_event_unref(dc_event_t* event);
*/
#define DC_EVENT_IMAP_MESSAGE_MOVED 105
/**
* Emitted before going into IDLE on the Inbox folder.
*
* @param data1 0
* @param data2 0
*/
#define DC_EVENT_IMAP_INBOX_IDLE 106
/**
* Emitted when a new blob file was successfully written
*
@@ -7130,16 +6990,6 @@ void dc_event_unref(dc_event_t* event);
/// `%1$s` will be replaced by name and address of the contact.
#define DC_STR_PROTECTION_DISABLED_BY_OTHER 161
/// "Scan to set up second device for %1$s"
///
/// `%1$s` will be replaced by name and address of the account.
#define DC_STR_BACKUP_TRANSFER_QR 162
/// "Account transferred to your second device."
///
/// Used as a device message after a successful backup transfer.
#define DC_STR_BACKUP_TRANSFER_MSG_BODY 163
/**
* @}
*/

View File

@@ -31,7 +31,6 @@ use deltachat::ephemeral::Timer as EphemeralTimer;
use deltachat::imex::BackupProvider;
use deltachat::key::DcKey;
use deltachat::message::MsgId;
use deltachat::net::read_url_blob;
use deltachat::qr_code_generator::{generate_backup_qr, get_securejoin_qr_svg};
use deltachat::reaction::{get_msg_reactions, send_reaction, Reactions};
use deltachat::stock_str::StockMessage;
@@ -501,7 +500,6 @@ pub unsafe extern "C" fn dc_event_get_id(event: *mut dc_event_t) -> libc::c_int
EventType::SmtpMessageSent(_) => 103,
EventType::ImapMessageDeleted(_) => 104,
EventType::ImapMessageMoved(_) => 105,
EventType::ImapInboxIdle => 106,
EventType::NewBlobFile(_) => 150,
EventType::DeletedBlobFile(_) => 151,
EventType::Warning(_) => 300,
@@ -546,7 +544,6 @@ pub unsafe extern "C" fn dc_event_get_data1_int(event: *mut dc_event_t) -> libc:
| EventType::SmtpMessageSent(_)
| EventType::ImapMessageDeleted(_)
| EventType::ImapMessageMoved(_)
| EventType::ImapInboxIdle
| EventType::NewBlobFile(_)
| EventType::DeletedBlobFile(_)
| EventType::Warning(_)
@@ -597,7 +594,6 @@ pub unsafe extern "C" fn dc_event_get_data2_int(event: *mut dc_event_t) -> libc:
| EventType::SmtpMessageSent(_)
| EventType::ImapMessageDeleted(_)
| EventType::ImapMessageMoved(_)
| EventType::ImapInboxIdle
| EventType::NewBlobFile(_)
| EventType::DeletedBlobFile(_)
| EventType::Warning(_)
@@ -657,7 +653,6 @@ pub unsafe extern "C" fn dc_event_get_data2_str(event: *mut dc_event_t) -> *mut
EventType::MsgsChanged { .. }
| EventType::ReactionsChanged { .. }
| EventType::IncomingMsg { .. }
| EventType::ImapInboxIdle
| EventType::MsgsNoticed(_)
| EventType::MsgDelivered { .. }
| EventType::MsgFailed { .. }
@@ -4231,10 +4226,6 @@ pub unsafe extern "C" fn dc_backup_provider_wait(provider: *mut dc_backup_provid
#[no_mangle]
pub unsafe extern "C" fn dc_backup_provider_unref(provider: *mut dc_backup_provider_t) {
if provider.is_null() {
eprintln!("ignoring careless call to dc_backup_provider_unref()");
return;
}
drop(Box::from_raw(provider));
}
@@ -4249,28 +4240,17 @@ pub unsafe extern "C" fn dc_receive_backup(
}
let ctx = &*context;
let qr_text = to_string_lossy(qr);
receive_backup(ctx.clone(), qr_text)
}
// Because this is a long-running operation make sure we own the Context. This stops a FFI
// user from deallocating it by calling unref on the object while we are using it.
fn receive_backup(ctx: Context, qr_text: String) -> libc::c_int {
let qr = match block_on(qr::check_qr(&ctx, &qr_text))
.log_err(&ctx, "Invalid QR code")
.context("Invalid QR code")
.set_last_error(&ctx)
{
let qr = match block_on(qr::check_qr(ctx, &qr_text)).log_err(ctx, "Invalid QR code") {
Ok(qr) => qr,
Err(_) => return 0,
};
match block_on(imex::get_backup(&ctx, qr))
.log_err(&ctx, "Get backup failed")
.context("Get backup failed")
.set_last_error(&ctx)
{
Ok(_) => 1,
Err(_) => 0,
}
spawn(async move {
imex::get_backup(ctx, qr)
.await
.log_err(ctx, "Get backup failed")
.ok();
});
1
}
trait ResultExt<T, E> {
@@ -4468,93 +4448,6 @@ pub unsafe extern "C" fn dc_provider_unref(provider: *mut dc_provider_t) {
// this may change once we start localizing string.
}
// dc_http_response_t
pub type dc_http_response_t = net::HttpResponse;
#[no_mangle]
pub unsafe extern "C" fn dc_get_http_response(
context: *const dc_context_t,
url: *const libc::c_char,
) -> *mut dc_http_response_t {
if context.is_null() || url.is_null() {
eprintln!("ignoring careless call to dc_get_http_response()");
return ptr::null_mut();
}
let context = &*context;
let url = to_string_lossy(url);
if let Ok(response) = block_on(read_url_blob(context, &url)).log_err(context, "read_url_blob") {
Box::into_raw(Box::new(response))
} else {
ptr::null_mut()
}
}
#[no_mangle]
pub unsafe extern "C" fn dc_http_response_get_mimetype(
response: *const dc_http_response_t,
) -> *mut libc::c_char {
if response.is_null() {
eprintln!("ignoring careless call to dc_http_response_get_mimetype()");
return ptr::null_mut();
}
let response = &*response;
response.mimetype.strdup()
}
#[no_mangle]
pub unsafe extern "C" fn dc_http_response_get_encoding(
response: *const dc_http_response_t,
) -> *mut libc::c_char {
if response.is_null() {
eprintln!("ignoring careless call to dc_http_response_get_encoding()");
return ptr::null_mut();
}
let response = &*response;
response.encoding.strdup()
}
#[no_mangle]
pub unsafe extern "C" fn dc_http_response_get_blob(
response: *const dc_http_response_t,
) -> *mut libc::c_char {
if response.is_null() {
eprintln!("ignoring careless call to dc_http_response_get_blob()");
return ptr::null_mut();
}
let response = &*response;
let blob_len = response.blob.len();
let ptr = libc::malloc(blob_len);
libc::memcpy(ptr, response.blob.as_ptr() as *mut libc::c_void, blob_len);
ptr as *mut libc::c_char
}
#[no_mangle]
pub unsafe extern "C" fn dc_http_response_get_size(
response: *const dc_http_response_t,
) -> libc::size_t {
if response.is_null() {
eprintln!("ignoring careless call to dc_http_response_get_size()");
return 0;
}
let response = &*response;
response.blob.len()
}
#[no_mangle]
pub unsafe extern "C" fn dc_http_response_unref(response: *mut dc_http_response_t) {
if response.is_null() {
eprintln!("ignoring careless call to dc_http_response_unref()");
return;
}
drop(Box::from_raw(response));
}
// -- Accounts
/// Reader-writer lock wrapper for accounts manager to guarantee thread safety when using

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-jsonrpc"
version = "1.112.9"
version = "1.111.0"
description = "DeltaChat JSON-RPC API"
edition = "2021"
default-run = "deltachat-jsonrpc-server"

View File

@@ -47,9 +47,6 @@ pub enum JSONRPCEventType {
msg: String,
},
/// Emitted before going into IDLE on the Inbox folder.
ImapInboxIdle,
/// Emitted when an new file in the $BLOBDIR was created
NewBlobFile {
file: String,
@@ -296,7 +293,6 @@ impl From<EventType> for JSONRPCEventType {
EventType::SmtpMessageSent(msg) => SmtpMessageSent { msg },
EventType::ImapMessageDeleted(msg) => ImapMessageDeleted { msg },
EventType::ImapMessageMoved(msg) => ImapMessageMoved { msg },
EventType::ImapInboxIdle => ImapInboxIdle,
EventType::NewBlobFile(file) => NewBlobFile { file },
EventType::DeletedBlobFile(file) => DeletedBlobFile { file },
EventType::Warning(msg) => Warning { msg },

View File

@@ -43,7 +43,6 @@ use types::account::Account;
use types::chat::FullChat;
use types::chat_list::ChatListEntry;
use types::contact::ContactObject;
use types::http::HttpResponse;
use types::message::MessageData;
use types::message::MessageObject;
use types::provider_info::ProviderInfo;
@@ -1611,15 +1610,6 @@ impl CommandApi {
Ok(general_purpose::STANDARD_NO_PAD.encode(blob))
}
/// Makes an HTTP GET request and returns a response.
///
/// `url` is the HTTP or HTTPS URL.
async fn get_http_response(&self, account_id: u32, url: String) -> Result<HttpResponse> {
let ctx = self.get_context(account_id).await?;
let response = deltachat::net::read_url_blob(&ctx, &url).await?.into();
Ok(response)
}
/// Forward messages to another chat.
///
/// All types of messages can be forwarded,

View File

@@ -1,29 +0,0 @@
use deltachat::net::HttpResponse as CoreHttpResponse;
use serde::Serialize;
use typescript_type_def::TypeDef;
#[derive(Serialize, TypeDef)]
pub struct HttpResponse {
/// base64-encoded response body.
blob: String,
/// MIME type, e.g. "text/plain" or "text/html".
mimetype: Option<String>,
/// Encoding, e.g. "utf-8".
encoding: Option<String>,
}
impl From<CoreHttpResponse> for HttpResponse {
fn from(response: CoreHttpResponse) -> Self {
use base64::{engine::general_purpose, Engine as _};
let blob = general_purpose::STANDARD_NO_PAD.encode(response.blob);
let mimetype = response.mimetype;
let encoding = response.encoding;
HttpResponse {
blob,
mimetype,
encoding,
}
}
}

View File

@@ -2,7 +2,6 @@ pub mod account;
pub mod chat;
pub mod chat_list;
pub mod contact;
pub mod http;
pub mod location;
pub mod message;
pub mod provider_info;

View File

@@ -55,5 +55,5 @@
},
"type": "module",
"types": "dist/deltachat.d.ts",
"version": "1.112.9"
"version": "1.111.0"
}

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-repl"
version = "1.112.9"
version = "1.111.0"
license = "MPL-2.0"
edition = "2021"

View File

@@ -21,9 +21,6 @@ deltachat_rpc_client = [
[project.entry-points.pytest11]
"deltachat_rpc_client.pytestplugin" = "deltachat_rpc_client.pytestplugin"
[tool.setuptools_scm]
root = ".."
[tool.black]
line-length = 120

View File

@@ -126,7 +126,7 @@ class Chat:
"file": file,
"location": location,
"overrideSenderName": override_sender_name,
"quotedMessageId": quoted_msg,
"quotedMsg": quoted_msg,
}
msg_id = await self._rpc.send_msg(self.account.id, self.id, draft)
return Message(self.account, msg_id)

View File

@@ -31,7 +31,6 @@ class EventType(str, Enum):
SMTP_MESSAGE_SENT = "SmtpMessageSent"
IMAP_MESSAGE_DELETED = "ImapMessageDeleted"
IMAP_MESSAGE_MOVED = "ImapMessageMoved"
IMAP_INBOX_IDLE = "ImapInboxIdle"
NEW_BLOB_FILE = "NewBlobFile"
DELETED_BLOB_FILE = "DeletedBlobFile"
WARNING = "Warning"

View File

@@ -55,11 +55,6 @@ class ACFactory:
async def get_online_account(self) -> Account:
account = await self.new_configured_account()
await account.start_io()
while True:
event = await account.wait_for_event()
print(event)
if event.type == EventType.IMAP_INBOX_IDLE:
break
return account
async def get_online_accounts(self, num: int) -> List[Account]:

View File

@@ -6,7 +6,7 @@ envlist =
[testenv]
commands =
pytest {posargs}
pytest --exitfirst {posargs}
setenv =
# Avoid stack overflow when Rust core is built without optimizations.
RUST_MIN_STACK=8388608
@@ -15,7 +15,6 @@ passenv =
deps =
pytest
pytest-asyncio
pytest-timeout
aiohttp
aiodns
@@ -28,6 +27,3 @@ deps =
commands =
black --quiet --check --diff src/ examples/ tests/
ruff src/ examples/ tests/
[pytest]
timeout = 60

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-rpc-server"
version = "1.112.9"
version = "1.111.0"
description = "DeltaChat JSON-RPC server"
edition = "2021"
readme = "README.md"
@@ -9,9 +9,11 @@ license = "MPL-2.0"
keywords = ["deltachat", "chat", "openpgp", "email", "encryption"]
categories = ["cryptography", "std", "email"]
[[bin]]
name = "deltachat-rpc-server"
[dependencies]
deltachat-jsonrpc = { path = "../deltachat-jsonrpc", default-features = false }
deltachat = { path = "..", default-features = false }
anyhow = "1"
env_logger = { version = "0.10.0" }

View File

@@ -1,11 +1,9 @@
use std::env;
///! Delta Chat core RPC server.
///!
///! It speaks JSON Lines over stdio.
use std::path::PathBuf;
use anyhow::{anyhow, Context as _, Result};
use deltachat::constants::DC_VERSION_STR;
use anyhow::Result;
use deltachat_jsonrpc::api::events::event_to_json_rpc_notification;
use deltachat_jsonrpc::api::{Accounts, CommandApi};
use futures_lite::stream::StreamExt;
@@ -15,23 +13,6 @@ use yerpc::{RpcClient, RpcSession};
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()> {
let mut args = env::args_os();
let _program_name = args.next().context("no command line arguments found")?;
if let Some(first_arg) = args.next() {
if first_arg.to_str() == Some("--version") {
if let Some(arg) = args.next() {
return Err(anyhow!("Unrecognized argument {:?}", arg));
}
eprintln!("{}", &*DC_VERSION_STR);
return Ok(());
} else {
return Err(anyhow!("Unrecognized option {:?}", first_arg));
}
}
if let Some(arg) = args.next() {
return Err(anyhow!("Unrecognized argument {:?}", arg));
}
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let path = std::env::var("DC_ACCOUNTS_PATH").unwrap_or_else(|_| "accounts".to_string());

View File

@@ -11,13 +11,9 @@ ignore = [
# Accept some duplicate versions, ideally we work towards this list
# becoming empty. Adding versions forces us to revisit this at least
# when upgrading.
# Please keep this list alphabetically sorted.
skip = [
{ name = "base64", version = "<0.21" },
{ name = "block-buffer", version = "<0.10" },
{ name = "clap", version = "3.2.23" },
{ name = "clap_lex", version = "0.2.4" },
{ name = "convert_case", version = "0.4.0" },
{ name = "darling", version = "<0.14" },
{ name = "darling_core", version = "<0.14" },
{ name = "darling_macro", version = "<0.14" },
@@ -33,16 +29,24 @@ skip = [
{ name = "rand_chacha", version = "<0.3" },
{ name = "rand_core", version = "<0.6" },
{ name = "sha2", version = "<0.10" },
{ name = "spin", version = "<0.9.6" },
{ name = "time", version = "<0.3" },
{ name = "version_check", version = "<0.9" },
{ name = "wasi", version = "<0.11" },
{ name = "windows-sys", version = "<0.45" },
{ name = "windows_aarch64_msvc", version = "<0.42" },
{ name = "windows_i686_gnu", version = "<0.42" },
{ name = "windows_i686_msvc", version = "<0.42" },
{ name = "windows_x86_64_gnu", version = "<0.42" },
{ name = "windows_x86_64_msvc", version = "<0.42" },
{ name = "windows_x86_64_gnu", version = "<0.42" },
{ name = "windows_i686_msvc", version = "<0.42" },
{ name = "windows_i686_gnu", version = "<0.42" },
{ name = "windows_aarch64_msvc", version = "<0.42" },
{ name = "unicode-xid", version = "<0.2.4" },
{ name = "syn", version = "<1.0" },
{ name = "quote", version = "<1.0" },
{ name = "proc-macro2", version = "<1.0" },
{ name = "portable-atomic", version = "<1.0" },
{ name = "spin", version = "<0.9.6" },
{ name = "convert_case", version = "0.4.0" },
{ name = "clap_lex", version = "0.2.4" },
{ name = "clap", version = "3.2.23" },
]
@@ -74,5 +78,7 @@ license-files = [
github = [
"async-email",
"deltachat",
"n0-computer",
"quinn-rs",
"dignifiedquire",
]

1287
fuzz/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -37,7 +37,6 @@ module.exports = {
DC_EVENT_ERROR: 400,
DC_EVENT_ERROR_SELF_NOT_IN_GROUP: 410,
DC_EVENT_IMAP_CONNECTED: 102,
DC_EVENT_IMAP_INBOX_IDLE: 106,
DC_EVENT_IMAP_MESSAGE_DELETED: 104,
DC_EVENT_IMAP_MESSAGE_MOVED: 105,
DC_EVENT_IMEX_FILE_WRITTEN: 2052,
@@ -113,7 +112,6 @@ module.exports = {
DC_QR_ADDR: 320,
DC_QR_ASK_VERIFYCONTACT: 200,
DC_QR_ASK_VERIFYGROUP: 202,
DC_QR_BACKUP: 251,
DC_QR_ERROR: 400,
DC_QR_FPR_MISMATCH: 220,
DC_QR_FPR_OK: 210,
@@ -151,7 +149,6 @@ module.exports = {
DC_STR_AEAP_EXPLANATION_AND_LINK: 123,
DC_STR_ARCHIVEDCHATS: 40,
DC_STR_AUDIO: 11,
DC_STR_BACKUP_TRANSFER_QR: 162,
DC_STR_BAD_TIME_MSG_BODY: 85,
DC_STR_BROADCAST_LIST: 115,
DC_STR_CANNOT_LOGIN: 60,

View File

@@ -8,7 +8,6 @@ module.exports = {
103: 'DC_EVENT_SMTP_MESSAGE_SENT',
104: 'DC_EVENT_IMAP_MESSAGE_DELETED',
105: 'DC_EVENT_IMAP_MESSAGE_MOVED',
106: 'DC_EVENT_IMAP_INBOX_IDLE',
150: 'DC_EVENT_NEW_BLOB_FILE',
151: 'DC_EVENT_DELETED_BLOB_FILE',
300: 'DC_EVENT_WARNING',

View File

@@ -37,7 +37,6 @@ export enum C {
DC_EVENT_ERROR = 400,
DC_EVENT_ERROR_SELF_NOT_IN_GROUP = 410,
DC_EVENT_IMAP_CONNECTED = 102,
DC_EVENT_IMAP_INBOX_IDLE = 106,
DC_EVENT_IMAP_MESSAGE_DELETED = 104,
DC_EVENT_IMAP_MESSAGE_MOVED = 105,
DC_EVENT_IMEX_FILE_WRITTEN = 2052,
@@ -113,7 +112,6 @@ export enum C {
DC_QR_ADDR = 320,
DC_QR_ASK_VERIFYCONTACT = 200,
DC_QR_ASK_VERIFYGROUP = 202,
DC_QR_BACKUP = 251,
DC_QR_ERROR = 400,
DC_QR_FPR_MISMATCH = 220,
DC_QR_FPR_OK = 210,
@@ -151,7 +149,6 @@ export enum C {
DC_STR_AEAP_EXPLANATION_AND_LINK = 123,
DC_STR_ARCHIVEDCHATS = 40,
DC_STR_AUDIO = 11,
DC_STR_BACKUP_TRANSFER_QR = 162,
DC_STR_BAD_TIME_MSG_BODY = 85,
DC_STR_BROADCAST_LIST = 115,
DC_STR_CANNOT_LOGIN = 60,
@@ -292,7 +289,6 @@ export const EventId2EventName: { [key: number]: string } = {
103: 'DC_EVENT_SMTP_MESSAGE_SENT',
104: 'DC_EVENT_IMAP_MESSAGE_DELETED',
105: 'DC_EVENT_IMAP_MESSAGE_MOVED',
106: 'DC_EVENT_IMAP_INBOX_IDLE',
150: 'DC_EVENT_NEW_BLOB_FILE',
151: 'DC_EVENT_DELETED_BLOB_FILE',
300: 'DC_EVENT_WARNING',

View File

@@ -60,5 +60,5 @@
"test:mocha": "mocha -r esm node/test/test.js --growl --reporter=spec --bail --exit"
},
"types": "node/dist/index.d.ts",
"version": "1.112.9"
}
"version": "1.111.0"
}

View File

@@ -190,7 +190,7 @@ class FFIEventTracker:
- ac2 is still running FetchExsistingMsgs job and thinks it's an existing, old message
- therefore no DC_EVENT_INCOMING_MSG is sent
"""
self.get_matching("DC_EVENT_IMAP_INBOX_IDLE")
self.get_info_contains("INBOX: Idle entering")
def wait_next_incoming_message(self):
"""wait for and return next incoming message."""

View File

@@ -0,0 +1,19 @@
import os
import subprocess
import sys
if __name__ == "__main__":
assert len(sys.argv) == 2
workspacedir = sys.argv[1]
for relpath in os.listdir(workspacedir):
if relpath.startswith("deltachat"):
p = os.path.join(workspacedir, relpath)
subprocess.check_call(
[
"auditwheel",
"repair",
p,
"-w",
workspacedir,
],
)

View File

@@ -220,16 +220,16 @@ def test_fetch_existing(acfactory, lp, mvbox_move):
acfactory.bring_accounts_online()
assert_folders_configured(ac1)
lp.sec("send out message with bcc to ourselves")
ac1.set_config("bcc_self", "1")
chat = acfactory.get_accepted_chat(ac1, ac2)
chat.send_text("message text")
assert ac1.direct_imap.select_config_folder("mvbox" if mvbox_move else "inbox")
with ac1.direct_imap.idle() as idle1:
lp.sec("send out message with bcc to ourselves")
ac1.set_config("bcc_self", "1")
chat = acfactory.get_accepted_chat(ac1, ac2)
chat.send_text("message text")
assert_folders_configured(ac1)
lp.sec("wait until the bcc_self message arrives in correct folder and is marked seen")
if mvbox_move:
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder DeltaChat as seen.")
else:
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
lp.sec("wait until the bcc_self message arrives in correct folder and is marked seen")
assert idle1.wait_for_seen()
assert_folders_configured(ac1)
lp.sec("create a cloned ac1 and fetch contact history during configure")
@@ -271,12 +271,12 @@ def test_fetch_existing_msgs_group_and_single(acfactory, lp):
ac1._evtracker.wait_next_incoming_message()
lp.sec("send out message with bcc to ourselves")
ac1.set_config("bcc_self", "1")
ac1_ac2_chat = ac1.create_chat(ac2)
ac1_ac2_chat.send_text("outgoing, encrypted direct message, creating a chat")
# wait until the bcc_self message arrives
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
with ac1.direct_imap.idle() as idle1:
ac1.set_config("bcc_self", "1")
ac1_ac2_chat = ac1.create_chat(ac2)
ac1_ac2_chat.send_text("outgoing, encrypted direct message, creating a chat")
# wait until the bcc_self message arrives
assert idle1.wait_for_seen()
lp.sec("Clone online account and let it fetch the existing messages")
ac1_clone = acfactory.new_online_configuring_account(cloned_from=ac1)

View File

@@ -134,27 +134,22 @@ def test_one_account_send_bcc_setting(acfactory, lp):
ac1.set_config("bcc_self", "1")
lp.sec("send out message with bcc to ourselves")
msg_out = chat.send_text("message2")
with ac1.direct_imap.idle() as idle1:
msg_out = chat.send_text("message2")
# wait for send out (BCC)
ev = ac1._evtracker.get_matching("DC_EVENT_SMTP_MESSAGE_SENT")
assert ac1.get_config("bcc_self") == "1"
# wait for send out (BCC)
ev = ac1._evtracker.get_matching("DC_EVENT_SMTP_MESSAGE_SENT")
assert ac1.get_config("bcc_self") == "1"
# Second client receives only second message, but not the first.
# now make sure we are sending message to ourselves too
assert self_addr in ev.data2
assert other_addr in ev.data2
assert idle1.wait_for_seen()
# Second client receives only second message, but not the first
ev_msg = ac1_clone._evtracker.wait_next_messages_changed()
assert ev_msg.text == msg_out.text
# now make sure we are sending message to ourselves too
assert self_addr in ev.data2
assert other_addr in ev.data2
# BCC-self messages are marked as seen by the sender device.
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
# Check that the message is marked as seen on IMAP.
ac1.direct_imap.select_folder("Inbox")
assert len(list(ac1.direct_imap.conn.fetch(AND(seen=True)))) == 1
def test_send_file_twice_unicode_filename_mangling(tmpdir, acfactory, lp):
ac1, ac2 = acfactory.get_online_accounts(2)
@@ -319,9 +314,6 @@ def test_webxdc_message(acfactory, data, lp):
assert msg2.text == "message1"
assert msg2.is_webxdc()
assert msg2.filename
ac2._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
ac2.direct_imap.select_folder("Inbox")
assert len(list(ac2.direct_imap.conn.fetch(AND(seen=True)))) == 1
def test_mvbox_sentbox_threads(acfactory, lp):
@@ -521,22 +513,22 @@ def test_send_and_receive_message_markseen(acfactory, lp):
msg4 = ac2._evtracker.wait_next_incoming_message()
lp.sec("mark messages as seen on ac2, wait for changes on ac1")
ac2.mark_seen_messages([msg2, msg4])
ev = ac2._evtracker.get_matching("DC_EVENT_MSGS_NOTICED")
assert msg2.chat.id == msg4.chat.id
assert ev.data1 == msg2.chat.id
assert ev.data2 == 0
ac2._evtracker.get_info_contains("Marked messages .* in folder INBOX as seen.")
with ac1.direct_imap.idle() as idle1:
with ac2.direct_imap.idle() as idle2:
ac2.mark_seen_messages([msg2, msg4])
ev = ac2._evtracker.get_matching("DC_EVENT_MSGS_NOTICED")
assert msg2.chat.id == msg4.chat.id
assert ev.data1 == msg2.chat.id
assert ev.data2 == 0
idle2.wait_for_seen()
lp.step("1")
for _i in range(2):
ev = ac1._evtracker.get_matching("DC_EVENT_MSG_READ")
assert ev.data1 > const.DC_CHAT_ID_LAST_SPECIAL
assert ev.data2 > const.DC_MSG_ID_LAST_SPECIAL
lp.step("2")
# Check that ac1 marks the read receipt as read.
ac1._evtracker.get_info_contains("Marked messages .* in folder INBOX as seen.")
lp.step("1")
for _i in range(2):
ev = ac1._evtracker.get_matching("DC_EVENT_MSG_READ")
assert ev.data1 > const.DC_CHAT_ID_LAST_SPECIAL
assert ev.data2 > const.DC_MSG_ID_LAST_SPECIAL
lp.step("2")
idle1.wait_for_seen() # Check that ac1 marks the read receipt as read
assert msg1.is_out_mdn_received()
assert msg3.is_out_mdn_received()
@@ -621,24 +613,18 @@ def test_markseen_message_and_mdn(acfactory, mvbox_move):
# Do not send BCC to self, we only want to test MDN on ac1.
ac1.set_config("bcc_self", "0")
acfactory.get_accepted_chat(ac1, ac2).send_text("hi")
msg = ac2._evtracker.wait_next_incoming_message()
ac2.mark_seen_messages([msg])
folder = "mvbox" if mvbox_move else "inbox"
for ac in [ac1, ac2]:
if mvbox_move:
ac._evtracker.get_info_contains("Marked messages [0-9]+ in folder DeltaChat as seen.")
else:
ac._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
ac1.direct_imap.select_config_folder(folder)
ac2.direct_imap.select_config_folder(folder)
with ac1.direct_imap.idle() as idle1:
with ac2.direct_imap.idle() as idle2:
acfactory.get_accepted_chat(ac1, ac2).send_text("hi")
msg = ac2._evtracker.wait_next_incoming_message()
# Check that the mdn is marked as seen
assert len(list(ac1.direct_imap.conn.fetch(AND(seen=True)))) == 1
# Check original message is marked as seen
assert len(list(ac2.direct_imap.conn.fetch(AND(seen=True)))) == 1
ac2.mark_seen_messages([msg])
idle2.wait_for_seen() # Check original message is marked as seen
idle1.wait_for_seen() # Check that the mdn is marked as seen
def test_reply_privately(acfactory):
@@ -692,24 +678,23 @@ def test_mdn_asymmetric(acfactory, lp):
assert len(msg.chat.get_messages()) == 1
lp.sec("ac2: mark incoming message as seen")
ac2.mark_seen_messages([msg])
ac1.direct_imap.select_config_folder("mvbox")
with ac1.direct_imap.idle() as idle1:
lp.sec("ac2: mark incoming message as seen")
ac2.mark_seen_messages([msg])
lp.sec("ac1: waiting for incoming activity")
# MDN should be moved even though MDNs are already disabled
ac1._evtracker.get_matching("DC_EVENT_IMAP_MESSAGE_MOVED")
lp.sec("ac1: waiting for incoming activity")
# MDN should be moved even though MDNs are already disabled
ac1._evtracker.get_matching("DC_EVENT_IMAP_MESSAGE_MOVED")
assert len(chat.get_messages()) == 1
assert len(chat.get_messages()) == 1
# Wait for the message to be marked as seen on IMAP.
ac1._evtracker.get_info_contains("Marked messages 1 in folder DeltaChat as seen.")
# Wait for the message to be marked as seen on IMAP.
assert idle1.wait_for_seen()
# MDN is received even though MDNs are already disabled
assert msg_out.is_out_mdn_received()
ac1.direct_imap.select_config_folder("mvbox")
assert len(list(ac1.direct_imap.conn.fetch(AND(seen=True)))) == 1
def test_send_and_receive_will_encrypt_decrypt(acfactory, lp):
ac1, ac2 = acfactory.get_online_accounts(2)

View File

@@ -8,7 +8,7 @@ envlist =
[testenv]
commands =
pytest -n6 --extra-info -v -rsXx --ignored --strict-tls {posargs: tests examples}
pytest -n6 --exitfirst --extra-info -v -rsXx --ignored --strict-tls {posargs: tests examples}
pip wheel . -w {toxworkdir}/wheelhouse --no-deps
setenv =
# Avoid stack overflow when Rust core is built without optimizations.
@@ -33,6 +33,18 @@ passenv =
CARGO_TARGET_DIR
RUSTC_WRAPPER
[testenv:auditwheels]
skipsdist = True
deps = auditwheel
passenv =
DCC_RS_DEV
DCC_RS_TARGET
AUDITWHEEL_ARCH
AUDITWHEEL_PLAT
AUDITWHEEL_POLICY
commands =
python tests/auditwheels.py {toxworkdir}/wheelhouse
[testenv:lint]
skipsdist = True
skip_install = True

View File

@@ -1 +0,0 @@
2023-05-12

View File

@@ -8,8 +8,6 @@ and an own build machine.
- `clippy.sh` runs `cargo clippy` for all Rust code in the project.
- `deny.sh` runs `cargo deny` for all Rust code in the project.
- `../.github/workflows` contains jobs run by GitHub Actions.
- `remote_tests_python.sh` rsyncs to a build machine and runs

View File

@@ -1,2 +0,0 @@
#!/bin/sh
cargo deny --workspace --all-features check -D warnings

View File

@@ -31,9 +31,7 @@ unset DCC_NEW_TMP_EMAIL
# Try to build wheels for a range of interpreters, but don't fail if they are not available.
# E.g. musllinux_1_1 does not have PyPy interpreters as of 2022-07-10
tox --workdir "$TOXWORKDIR" -e py37,py38,py39,py310,py311,pypy37,pypy38,pypy39 --skip-missing-interpreters true
auditwheel repair "$TOXWORKDIR"/wheelhouse/deltachat* -w "$TOXWORKDIR/wheelhouse"
tox --workdir "$TOXWORKDIR" -e py37,py38,py39,py310,py311,pypy37,pypy38,pypy39,auditwheels --skip-missing-interpreters true
echo -----------------------

View File

@@ -1,6 +1,5 @@
#!/usr/bin/env python3
import datetime
import json
import os
import pathlib
@@ -57,8 +56,7 @@ def update_package_json(relpath, newversion):
json_data = json.loads(f.read())
json_data["version"] = newversion
with open(p, "w") as f:
json.dump(json_data, f, sort_keys=True, indent=2)
f.write("\n")
f.write(json.dumps(json_data, sort_keys=True, indent=2))
def main():
@@ -92,25 +90,15 @@ def main():
ffi_toml = read_toml_version("deltachat-ffi/Cargo.toml")
assert core_toml == ffi_toml, (core_toml, ffi_toml)
today = datetime.date.today().isoformat()
if "alpha" not in newversion:
changelog_name = "CHANGELOG.md"
changelog_tmpname = changelog_name + ".tmp"
changelog_tmp = open(changelog_tmpname, "w")
found = False
for line in open(changelog_name):
for line in open("CHANGELOG.md"):
## 1.25.0
if line == f"## [{newversion}]\n":
line = f"## [{newversion}] - {today}\n"
found = True
changelog_tmp.write(line)
if not found:
if line.startswith("## ") and line[2:].strip().startswith(newversion):
break
else:
raise SystemExit(
f"{changelog_name} contains no entry for version: {newversion}"
f"CHANGELOG.md contains no entry for version: {newversion}"
)
changelog_tmp.close()
os.rename(changelog_tmpname, changelog_name)
for toml_filename in toml_list:
replace_toml_version(toml_filename, newversion)
@@ -118,9 +106,6 @@ def main():
for json_filename in json_list:
update_package_json(json_filename, newversion)
with open("release-date.in", "w") as f:
f.write(today)
print("running cargo check")
subprocess.call(["cargo", "check"])

View File

@@ -262,7 +262,7 @@ impl Accounts {
pub async fn stop_io(&self) {
// Sending an event here wakes up event loop even
// if there are no accounts.
info!(self, "Stopping IO for all accounts.");
info!(self, "Stopping IO for all accounts");
for account in self.accounts.values() {
account.stop_io().await;
}

View File

@@ -417,7 +417,7 @@ impl<'a> BlobObject<'a> {
info!(
context,
"Final scaled-down image size: {}B ({}px).",
"Final scaled-down image size: {}B ({}px)",
encoded.len(),
img_wh
);
@@ -458,7 +458,7 @@ impl<'a> BlobObject<'a> {
Some(3) => return Ok(180),
Some(6) => return Ok(90),
Some(8) => return Ok(270),
other => warn!(context, "Exif orientation value ignored: {other:?}."),
other => warn!(context, "exif orientation value ignored: {:?}", other),
}
}
Ok(0)
@@ -490,7 +490,7 @@ impl<'a> BlobDirContents<'a> {
match entry {
Ok(entry) => Some(entry),
Err(err) => {
error!(context, "Failed to read blob file: {err}.");
error!(context, "Failed to read blob file: {err}");
None
}
}
@@ -501,7 +501,7 @@ impl<'a> BlobDirContents<'a> {
false => {
warn!(
context,
"Export: Found blob dir entry {} that is not a file, ignoring.",
"Export: Found blob dir entry {} that is not a file, ignoring",
entry.path().display()
);
None
@@ -569,7 +569,7 @@ fn encoded_img_exceeds_bytes(
if encoded.len() > max_bytes {
info!(
context,
"Image size {}B ({}x{}px) exceeds {}B, need to scale down.",
"image size {}B ({}x{}px) exceeds {}B, need to scale down",
encoded.len(),
img.width(),
img.height(),

View File

@@ -247,7 +247,7 @@ impl ChatId {
} else {
warn!(
context,
"Cannot create chat, contact {contact_id} does not exist."
"Cannot create chat, contact {} does not exist.", contact_id,
);
bail!("Can not create chat for non-existing contact");
}
@@ -285,7 +285,7 @@ impl ChatId {
let chat_id = ChatId::new(u32::try_from(row_id)?);
info!(
context,
"Created group/mailinglist '{}' grpid={} as {}, blocked={}.",
"Created group/mailinglist '{}' grpid={} as {}, blocked={}",
grpname,
grpid,
chat_id,
@@ -338,14 +338,14 @@ impl ChatId {
if contact_id != ContactId::SELF {
info!(
context,
"Blocking the contact {contact_id} to block 1:1 chat."
"Blocking the contact {} to block 1:1 chat", contact_id
);
Contact::block(context, contact_id).await?;
}
}
}
Chattype::Group => {
info!(context, "Can't block groups yet, deleting the chat.");
info!(context, "Can't block groups yet, deleting the chat");
self.delete(context).await?;
}
Chattype::Mailinglist => {
@@ -500,7 +500,7 @@ impl ChatId {
let chat = Chat::load_from_db(context, self).await?;
if let Err(e) = self.inner_set_protection(context, protect).await {
error!(context, "Cannot set protection: {e:#}."); // make error user-visible
error!(context, "Cannot set protection: {}", e); // make error user-visible
return Err(e);
}
@@ -1070,7 +1070,7 @@ impl ChatId {
);
info!(
context,
"Set gossiped_timestamp for chat {} to {}.", self, timestamp,
"set gossiped_timestamp for chat {} to {}.", self, timestamp,
);
context
@@ -1211,7 +1211,7 @@ impl Chat {
Err(err) => {
error!(
context,
"Failed to load contacts for {}: {:#}.", chat.id, err
"failed to load contacts for {}: {:#}", chat.id, err
);
}
}
@@ -2032,11 +2032,8 @@ async fn prepare_msg_blob(context: &Context, msg: &mut Message) -> Result<()> {
.with_context(|| format!("attachment missing for message of type #{}", msg.viewtype))?;
if msg.viewtype == Viewtype::Image {
if let Err(err) = blob.recode_to_image_size(context).await {
warn!(
context,
"Cannot recode image, using original data: {err:#}."
);
if let Err(e) = blob.recode_to_image_size(context).await {
warn!(context, "Cannot recode image, using original data: {:?}", e);
}
}
msg.param.set(Param::File, blob.as_name());
@@ -2259,7 +2256,7 @@ async fn create_send_msg_job(context: &Context, msg_id: MsgId) -> Result<Option<
let attach_selfavatar = match shall_attach_selfavatar(context, msg.chat_id).await {
Ok(attach_selfavatar) => attach_selfavatar,
Err(err) => {
warn!(context, "SMTP job cannot get selfavatar-state: {err:#}.");
warn!(context, "job: cannot get selfavatar-state: {:#}", err);
false
}
};
@@ -2286,7 +2283,7 @@ async fn create_send_msg_job(context: &Context, msg_id: MsgId) -> Result<Option<
// may happen eg. for groups with only SELF and bcc_self disabled
info!(
context,
"Message {msg_id} has no recipient, skipping smtp-send."
"message {} has no recipient, skipping smtp-send", msg_id
);
msg_id.set_delivered(context).await?;
return Ok(None);
@@ -2321,27 +2318,27 @@ async fn create_send_msg_job(context: &Context, msg_id: MsgId) -> Result<Option<
if 0 != rendered_msg.last_added_location_id {
if let Err(err) = location::set_kml_sent_timestamp(context, msg.chat_id, time()).await {
error!(context, "Failed to set kml sent_timestamp: {err:#}.");
error!(context, "Failed to set kml sent_timestamp: {:#}", err);
}
if !msg.hidden {
if let Err(err) =
location::set_msg_location_id(context, msg.id, rendered_msg.last_added_location_id)
.await
{
error!(context, "Failed to set msg_location_id: {err:#}.");
error!(context, "Failed to set msg_location_id: {:#}", err);
}
}
}
if let Some(sync_ids) = rendered_msg.sync_ids_to_delete {
if let Err(err) = context.delete_sync_ids(sync_ids).await {
error!(context, "Failed to delete sync ids: {err:#}.");
error!(context, "Failed to delete sync ids: {:#}", err);
}
}
if attach_selfavatar {
if let Err(err) = msg.chat_id.set_selfavatar_timestamp(context, time()).await {
error!(context, "Failed to set selfavatar timestamp: {err:#}.");
error!(context, "Failed to set selfavatar timestamp: {:#}", err);
}
}
@@ -2696,7 +2693,8 @@ pub(crate) async fn mark_old_messages_as_noticed(
if !changed_chats.is_empty() {
info!(
context,
"Marking chats as noticed because there are newer outgoing messages: {changed_chats:?}."
"Marking chats as noticed because there are newer outgoing messages: {:?}",
changed_chats
);
}
@@ -2727,14 +2725,12 @@ pub async fn get_chat_media(
"SELECT id
FROM msgs
WHERE (1=? OR chat_id=?)
AND chat_id != ?
AND (type=? OR type=? OR type=?)
AND hidden=0
ORDER BY timestamp, id;",
paramsv![
chat_id.is_none(),
chat_id.unwrap_or_else(|| ChatId::new(0)),
DC_CHAT_ID_TRASH,
msg_type,
if msg_type2 != Viewtype::Unknown {
msg_type2
@@ -3019,7 +3015,7 @@ pub(crate) async fn add_contact_to_chat_ex(
// if SELF is not in the group, members cannot be added at all.
warn!(
context,
"Invalid attempt to add self e-mail address to group."
"invalid attempt to add self e-mail address to group"
);
return Ok(false);
}
@@ -3566,7 +3562,7 @@ pub async fn add_device_msg_with_importance(
if let Some(label) = label {
if was_device_msg_ever_added(context, label).await? {
info!(context, "Device-message {label} already added.");
info!(context, "device-message {} already added", label);
return Ok(msg_id);
}
}
@@ -3797,7 +3793,6 @@ mod tests {
use crate::chatlist::{get_archived_cnt, Chatlist};
use crate::constants::{DC_GCL_ARCHIVED_ONLY, DC_GCL_NO_SPECIALS};
use crate::contact::{Contact, ContactAddress};
use crate::message::delete_msgs;
use crate::receive_imf::receive_imf;
use crate::test_utils::TestContext;
@@ -5980,7 +5975,7 @@ mod tests {
include_bytes!("../test-data/image/avatar64x64.png"),
)
.await?;
let second_image_msg_id = send_media(
send_media(
&t,
chat_id2,
Viewtype::Image,
@@ -6082,21 +6077,6 @@ mod tests {
4
);
// Delete an image.
delete_msgs(&t, &[second_image_msg_id]).await?;
assert_eq!(
get_chat_media(
&t,
None,
Viewtype::Image,
Viewtype::Sticker,
Viewtype::Webxdc,
)
.await?
.len(),
3
);
Ok(())
}
}

View File

@@ -176,7 +176,7 @@ impl Chatlist {
// allow searching over special names that may change at any time
// when the ui calls set_stock_translation()
if let Err(err) = update_special_chat_names(context).await {
warn!(context, "Cannot update special chat names: {err:#}.")
warn!(context, "cannot update special chat names: {:?}", err)
}
let str_like_cmd = format!("%{query}%");

View File

@@ -2,6 +2,7 @@
mod auto_mozilla;
mod auto_outlook;
mod read_url;
mod server_params;
use anyhow::{bail, ensure, Context as _, Result};

View File

@@ -6,10 +6,10 @@ use std::str::FromStr;
use quick_xml::events::{BytesStart, Event};
use super::read_url::read_url;
use super::{Error, ServerParams};
use crate::context::Context;
use crate::login_param::LoginParam;
use crate::net::read_url;
use crate::provider::{Protocol, Socket};
#[derive(Debug)]

View File

@@ -7,9 +7,9 @@ use std::io::BufRead;
use quick_xml::events::Event;
use super::read_url::read_url;
use super::{Error, ServerParams};
use crate::context::Context;
use crate::net::read_url;
use crate::provider::{Protocol, Socket};
/// Result of parsing a single `Protocol` tag.

44
src/configure/read_url.rs Normal file
View File

@@ -0,0 +1,44 @@
use anyhow::{anyhow, format_err};
use crate::context::Context;
use crate::socks::Socks5Config;
pub async fn read_url(context: &Context, url: &str) -> anyhow::Result<String> {
match read_url_inner(context, url).await {
Ok(s) => {
info!(context, "Successfully read url {}", url);
Ok(s)
}
Err(e) => {
info!(context, "Can't read URL {}: {:#}", url, e);
Err(format_err!("Can't read URL {}: {:#}", url, e))
}
}
}
pub async fn read_url_inner(context: &Context, url: &str) -> anyhow::Result<String> {
let socks5_config = Socks5Config::from_database(&context.sql).await?;
let client = crate::http::get_client(socks5_config)?;
let mut url = url.to_string();
// Follow up to 10 http-redirects
for _i in 0..10 {
let response = client.get(&url).send().await?;
if response.status().is_redirection() {
let headers = response.headers();
let header = headers
.get_all("location")
.iter()
.last()
.ok_or_else(|| anyhow!("Redirection doesn't have a target location"))?
.to_str()?;
info!(context, "Following redirect to {}", header);
url = header.to_string();
continue;
}
return response.text().await.map_err(Into::into);
}
Err(format_err!("Followed 10 redirections"))
}

View File

@@ -260,7 +260,7 @@ enum RunningState {
Running { cancel_sender: Sender<()> },
/// Cancel signal has been sent, waiting for ongoing process to be freed.
ShallStop { request: Instant },
ShallStop,
/// There is no ongoing process, a new one can be allocated.
Stopped,
@@ -530,9 +530,6 @@ impl Context {
pub(crate) async fn free_ongoing(&self) {
let mut s = self.running_state.write().await;
if let RunningState::ShallStop { request } = *s {
info!(self, "Ongoing stopped in {:?}", request.elapsed());
}
*s = RunningState::Stopped;
}
@@ -545,11 +542,9 @@ impl Context {
warn!(self, "could not cancel ongoing: {:#}", err);
}
info!(self, "Signaling the ongoing process to stop ASAP.",);
*s = RunningState::ShallStop {
request: Instant::now(),
};
*s = RunningState::ShallStop;
}
RunningState::ShallStop { .. } | RunningState::Stopped => {
RunningState::ShallStop | RunningState::Stopped => {
info!(self, "No ongoing process to stop.",);
}
}
@@ -559,7 +554,7 @@ impl Context {
pub(crate) async fn shall_stop_ongoing(&self) -> bool {
match &*self.running_state.read().await {
RunningState::Running { .. } => false,
RunningState::ShallStop { .. } | RunningState::Stopped => true,
RunningState::ShallStop | RunningState::Stopped => true,
}
}

View File

@@ -133,9 +133,6 @@ pub enum EventType {
/// Emitted when an IMAP message has been moved
ImapMessageMoved(String),
/// Emitted before going into IDLE on the Inbox folder.
ImapInboxIdle,
/// Emitted when an new file in the $BLOBDIR was created
NewBlobFile(String),

24
src/http.rs Normal file
View File

@@ -0,0 +1,24 @@
//! # HTTP module.
use std::time::Duration;
use anyhow::Result;
use crate::socks::Socks5Config;
const HTTP_TIMEOUT: Duration = Duration::from_secs(30);
pub(crate) fn get_client(socks5_config: Option<Socks5Config>) -> Result<reqwest::Client> {
let builder = reqwest::ClientBuilder::new().timeout(HTTP_TIMEOUT);
let builder = if let Some(socks5_config) = socks5_config {
let proxy = reqwest::Proxy::all(socks5_config.to_url())?;
builder.proxy(proxy)
} else {
// Disable usage of "system" proxy configured via environment variables.
// It is enabled by default in `reqwest`, see
// <https://docs.rs/reqwest/0.11.14/reqwest/struct.ClientBuilder.html#method.no_proxy>
// for documentation.
builder.no_proxy()
};
Ok(builder.build()?)
}

View File

@@ -1324,8 +1324,8 @@ impl Imap {
// Fetch last DC_FETCH_EXISTING_MSGS_COUNT (100) messages.
// Sequence numbers are sequential. If there are 1000 messages in the inbox,
// we can fetch the sequence numbers 900-1000 and get the last 100 messages.
let first = cmp::max(1, exists - DC_FETCH_EXISTING_MSGS_COUNT + 1);
let set = format!("{first}:{exists}");
let first = cmp::max(1, exists - DC_FETCH_EXISTING_MSGS_COUNT);
let set = format!("{first}:*");
let mut list = session
.fetch(&set, PREFETCH_FLAGS)
.await

View File

@@ -91,13 +91,15 @@ pub async fn imex(
let cancel = context.alloc_ongoing().await?;
let res = {
let _guard = context.scheduler.pause(context.clone()).await;
imex_inner(context, what, path, passphrase)
let mut guard = context.scheduler.pause(context.clone()).await;
let res = imex_inner(context, what, path, passphrase)
.race(async {
cancel.recv().await.ok();
Err(format_err!("canceled"))
})
.await
.await;
guard.resume().await;
res
};
context.free_ongoing().await;
@@ -397,7 +399,8 @@ async fn imex_inner(
export_backup(context, path, passphrase.unwrap_or_default()).await
}
ImexMode::ImportBackup => {
import_backup(context, path, passphrase.unwrap_or_default()).await
import_backup(context, path, passphrase.unwrap_or_default()).await?;
context.sql.run_migrations(context).await
}
}
}
@@ -473,7 +476,6 @@ async fn import_backup(
}
}
context.sql.run_migrations(context).await?;
delete_and_reset_all_device_msgs(context).await?;
Ok(())

View File

@@ -32,8 +32,7 @@ use std::task::Poll;
use anyhow::{anyhow, bail, ensure, format_err, Context as _, Result};
use async_channel::Receiver;
use futures_lite::StreamExt;
use iroh::blobs::Collection;
use iroh::get::DataStream;
use iroh::get::{DataStream, Options};
use iroh::progress::ProgressEmitter;
use iroh::protocol::AuthToken;
use iroh::provider::{DataSource, Event, Provider, Ticket};
@@ -44,20 +43,15 @@ use tokio::sync::broadcast::error::RecvError;
use tokio::sync::{broadcast, Mutex};
use tokio::task::{JoinHandle, JoinSet};
use tokio_stream::wrappers::ReadDirStream;
use tokio_util::sync::CancellationToken;
use crate::blob::BlobDirContents;
use crate::chat::{add_device_msg, delete_and_reset_all_device_msgs};
use crate::chat::delete_and_reset_all_device_msgs;
use crate::context::Context;
use crate::message::{Message, Viewtype};
use crate::qr::{self, Qr};
use crate::stock_str::backup_transfer_msg_body;
use crate::qr::Qr;
use crate::{e2ee, EventType};
use super::{export_database, DBFILE_BACKUP_NAME};
const MAX_CONCURRENT_DIALS: u8 = 16;
/// Provide or send a backup of this device.
///
/// This creates a backup of the current device and starts a service which offers another
@@ -77,8 +71,6 @@ pub struct BackupProvider {
handle: JoinHandle<Result<()>>,
/// The ticket to retrieve the backup collection.
ticket: Ticket,
/// Guard to cancel the provider on drop.
_drop_guard: tokio_util::sync::DropGuard,
}
impl BackupProvider {
@@ -99,7 +91,7 @@ impl BackupProvider {
// Acquire global "ongoing" mutex.
let cancel_token = context.alloc_ongoing().await?;
let paused_guard = context.scheduler.pause(context.clone()).await;
let mut paused_guard = context.scheduler.pause(context.clone()).await;
let context_dir = context
.get_blobdir()
.parent()
@@ -127,28 +119,21 @@ impl BackupProvider {
Ok((provider, ticket)) => (provider, ticket),
Err(err) => {
context.free_ongoing().await;
paused_guard.resume().await;
return Err(err);
}
};
let drop_token = CancellationToken::new();
let handle = {
let context = context.clone();
let drop_token = drop_token.clone();
tokio::spawn(async move {
let res = Self::watch_provider(&context, provider, cancel_token, drop_token).await;
let res = Self::watch_provider(&context, provider, cancel_token).await;
context.free_ongoing().await;
// Explicit drop to move the guards into this future
drop(paused_guard);
paused_guard.resume().await;
drop(dbfile);
res
})
};
Ok(Self {
handle,
ticket,
_drop_guard: drop_token.drop_guard(),
})
Ok(Self { handle, ticket })
}
/// Creates the provider task.
@@ -184,7 +169,7 @@ impl BackupProvider {
.spawn()?;
context.emit_event(SendProgress::ProviderListening.into());
info!(context, "Waiting for remote to connect");
let ticket = provider.ticket(hash)?;
let ticket = provider.ticket(hash);
Ok((provider, ticket))
}
@@ -202,8 +187,8 @@ impl BackupProvider {
context: &Context,
mut provider: Provider,
cancel_token: Receiver<()>,
drop_token: CancellationToken,
) -> Result<()> {
// _dbfile exists so we can clean up the file once it is no longer needed
let mut events = provider.subscribe();
let mut total_size = 0;
let mut current_size = 0;
@@ -263,21 +248,12 @@ impl BackupProvider {
},
_ = cancel_token.recv() => {
provider.shutdown();
break Err(anyhow!("BackupProvider cancelled"));
break Err(anyhow!("BackupSender cancelled"));
},
_ = drop_token.cancelled() => {
provider.shutdown();
break Err(anyhow!("BackupProvider dropped"));
}
}
};
match &res {
Ok(_) => {
context.emit_event(SendProgress::Completed.into());
let mut msg = Message::new(Viewtype::Text);
msg.text = Some(backup_transfer_msg_body(context).await);
add_device_msg(context, None, Some(&mut msg)).await?;
}
Ok(_) => context.emit_event(SendProgress::Completed.into()),
Err(err) => {
error!(context, "Backup transfer failure: {err:#}");
context.emit_event(SendProgress::Failed.into())
@@ -393,20 +369,19 @@ pub async fn get_backup(context: &Context, qr: Qr) -> Result<()> {
!context.is_configured().await?,
"Cannot import backups to accounts in use."
);
let mut guard = context.scheduler.pause(context.clone()).await;
// Acquire global "ongoing" mutex.
let cancel_token = context.alloc_ongoing().await?;
let _guard = context.scheduler.pause(context.clone()).await;
info!(
context,
"Running get_backup for {}",
qr::format_backup(&qr)?
);
let res = tokio::select! {
biased;
res = get_backup_inner(context, qr) => res,
res = get_backup_inner(context, qr) => {
context.free_ongoing().await;
res
}
_ = cancel_token.recv() => Err(format_err!("cancelled")),
};
context.free_ongoing().await;
guard.resume().await;
res
}
@@ -415,68 +390,104 @@ async fn get_backup_inner(context: &Context, qr: Qr) -> Result<()> {
Qr::Backup { ticket } => ticket,
_ => bail!("QR code for backup must be of type DCBACKUP"),
};
match transfer_from_provider(context, &ticket).await {
Ok(()) => {
context.sql.run_migrations(context).await?;
delete_and_reset_all_device_msgs(context).await?;
context.emit_event(ReceiveProgress::Completed.into());
Ok(())
}
Err(err) => {
// Clean up any blobs we already wrote.
let readdir = fs::read_dir(context.get_blobdir()).await?;
let mut readdir = ReadDirStream::new(readdir);
while let Some(dirent) = readdir.next().await {
if let Ok(dirent) = dirent {
fs::remove_file(dirent.path()).await.ok();
}
if ticket.addrs.is_empty() {
bail!("ticket is missing addresses to dial");
}
for addr in &ticket.addrs {
let opts = Options {
addr: *addr,
peer_id: Some(ticket.peer),
keylog: false,
};
info!(context, "attempting to contact {}", addr);
match transfer_from_provider(context, &ticket, opts).await {
Ok(_) => {
delete_and_reset_all_device_msgs(context).await?;
context.emit_event(ReceiveProgress::Completed.into());
return Ok(());
}
Err(TransferError::ConnectionError(err)) => {
warn!(context, "Connection error: {err:#}.");
continue;
}
Err(TransferError::Other(err)) => {
// Clean up any blobs we already wrote.
let readdir = fs::read_dir(context.get_blobdir()).await?;
let mut readdir = ReadDirStream::new(readdir);
while let Some(dirent) = readdir.next().await {
if let Ok(dirent) = dirent {
fs::remove_file(dirent.path()).await.ok();
}
}
context.emit_event(ReceiveProgress::Failed.into());
return Err(err);
}
context.emit_event(ReceiveProgress::Failed.into());
Err(err)
}
}
Err(anyhow!("failed to contact provider"))
}
async fn transfer_from_provider(context: &Context, ticket: &Ticket) -> Result<()> {
/// Error during a single transfer attempt.
///
/// Mostly exists to distinguish between `ConnectionError` and any other errors.
#[derive(Debug, thiserror::Error)]
enum TransferError {
#[error("connection error")]
ConnectionError(#[source] anyhow::Error),
#[error("other")]
Other(#[source] anyhow::Error),
}
async fn transfer_from_provider(
context: &Context,
ticket: &Ticket,
opts: Options,
) -> Result<(), TransferError> {
let progress = ProgressEmitter::new(0, ReceiveProgress::max_blob_progress());
spawn_progress_proxy(context.clone(), progress.subscribe());
let mut connected = false;
let on_connected = || {
context.emit_event(ReceiveProgress::Connected.into());
async { Ok(()) }
};
let on_collection = |collection: &Collection| {
context.emit_event(ReceiveProgress::CollectionReceived.into());
progress.set_total(collection.total_blobs_size());
connected = true;
async { Ok(()) }
};
let jobs = Mutex::new(JoinSet::default());
let on_blob =
|hash, reader, name| on_blob(context, &progress, &jobs, ticket, hash, reader, name);
// Perform the transfer.
let keylog = false; // Do not enable rustls SSLKEYLOGFILE env var functionality
let stats = iroh::get::run_ticket(
ticket,
keylog,
MAX_CONCURRENT_DIALS,
let res = iroh::get::run(
ticket.hash,
ticket.token,
opts,
on_connected,
on_collection,
|collection| {
context.emit_event(ReceiveProgress::CollectionReceived.into());
progress.set_total(collection.total_blobs_size());
async { Ok(()) }
},
on_blob,
)
.await?;
.await;
let mut jobs = jobs.lock().await;
while let Some(job) = jobs.join_next().await {
job.context("job failed")?;
job.context("job failed").map_err(TransferError::Other)?;
}
drop(progress);
info!(
context,
"Backup transfer finished, transfer rate was {} Mbps.",
stats.mbits()
);
Ok(())
match res {
Ok(stats) => {
info!(
context,
"Backup transfer finished, transfer rate is {} Mbps.",
stats.mbits()
);
Ok(())
}
Err(err) => match connected {
true => Err(TransferError::Other(err)),
false => Err(TransferError::ConnectionError(err)),
},
}
}
/// Get callback when a blob is received from the provider.
@@ -518,7 +529,7 @@ async fn on_blob(
if name.starts_with("db/") {
let context = context.clone();
let token = ticket.token().to_string();
let token = ticket.token.to_string();
jobs.lock().await.spawn(async move {
if let Err(err) = context.sql.import(&path, token).await {
error!(context, "cannot import database: {:#?}", err);
@@ -680,16 +691,4 @@ mod tests {
assert_eq!(out, EventType::ImexProgress(progress));
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_drop_provider() {
let mut tcm = TestContextManager::new();
let ctx = tcm.alice().await;
let provider = BackupProvider::prepare(&ctx).await.unwrap();
drop(provider);
ctx.evtracker
.get_matching(|ev| matches!(ev, EventType::ImexProgress(0)))
.await;
}
}

View File

@@ -153,7 +153,7 @@ impl<'a> Connection<'a> {
}
pub(crate) async fn perform_job(context: &Context, mut connection: Connection<'_>, mut job: Job) {
info!(context, "Job {} started...", &job);
info!(context, "job {} started...", &job);
let try_res = match perform_job_action(context, &mut job, &mut connection, 0).await {
Status::RetryNow => perform_job_action(context, &mut job, &mut connection, 1).await,
@@ -165,7 +165,7 @@ pub(crate) async fn perform_job(context: &Context, mut connection: Connection<'_
let tries = job.tries + 1;
if tries < JOB_RETRIES {
info!(context, "Increase job {job} tries to {tries}.");
info!(context, "increase job {} tries to {}", job, tries);
job.tries = tries;
let time_offset = get_backoff_time_offset(tries);
job.desired_timestamp = time() + time_offset;
@@ -177,23 +177,26 @@ pub(crate) async fn perform_job(context: &Context, mut connection: Connection<'_
time_offset
);
job.save(context).await.unwrap_or_else(|err| {
error!(context, "Failed to save job: {err:#}.");
error!(context, "failed to save job: {:#}", err);
});
} else {
info!(
context,
"Remove job {job} as it exhausted {JOB_RETRIES} retries."
"remove job {} as it exhausted {} retries", job, JOB_RETRIES
);
job.delete(context).await.unwrap_or_else(|err| {
error!(context, "Failed to delete job: {err:#}.");
error!(context, "failed to delete job: {:#}", err);
});
}
}
Status::Finished(res) => {
if let Err(err) = res {
warn!(context, "Remove job {job} as it failed with error {err:#}.");
warn!(
context,
"remove job {} as it failed with error {:#}", job, err
);
} else {
info!(context, "Remove job {job} as it succeeded.");
info!(context, "remove job {} as it succeeded", job);
}
job.delete(context).await.unwrap_or_else(|err| {
@@ -209,13 +212,13 @@ async fn perform_job_action(
connection: &mut Connection<'_>,
tries: u32,
) -> Status {
info!(context, "Begin immediate try {tries} of job {job}.");
info!(context, "begin immediate try {} of job {}", tries, job);
let try_res = match job.action {
Action::DownloadMsg => job.download_msg(context, connection.inbox()).await,
};
info!(context, "Finished immediate try {tries} of job {job}.");
info!(context, "Finished immediate try {} of job {}", tries, job);
try_res
}
@@ -247,7 +250,7 @@ pub(crate) async fn schedule_resync(context: &Context) -> Result<()> {
pub async fn add(context: &Context, job: Job) -> Result<()> {
job.save(context).await.context("failed to save job")?;
info!(context, "Interrupt: IMAP.");
info!(context, "interrupt: imap");
context
.scheduler
.interrupt_inbox(InterruptInfo::new(false))
@@ -261,7 +264,7 @@ pub async fn add(context: &Context, job: Job) -> Result<()> {
/// jobs, this is tricky and probably wrong currently. Look at the
/// SQL queries for details.
pub(crate) async fn load_next(context: &Context, info: &InterruptInfo) -> Result<Option<Job>> {
info!(context, "Loading job.");
info!(context, "loading job");
let query;
let params;
@@ -313,19 +316,19 @@ LIMIT 1;
Ok(job) => return Ok(job),
Err(err) => {
// Remove invalid job from the DB
info!(context, "Cleaning up job, because of {err:#}.");
info!(context, "cleaning up job, because of {:#}", err);
// TODO: improve by only doing a single query
let id = context
.sql
.query_row(query, params.clone(), |row| row.get::<_, i32>(0))
.await
.context("failed to retrieve invalid job ID from the database")?;
.context("Failed to retrieve invalid job ID from the database")?;
context
.sql
.execute("DELETE FROM jobs WHERE id=?;", paramsv![id])
.await
.with_context(|| format!("failed to delete invalid job {id}"))?;
.with_context(|| format!("Failed to delete invalid job {id}"))?;
}
}
}

View File

@@ -68,6 +68,7 @@ mod decrypt;
pub mod download;
mod e2ee;
pub mod ephemeral;
mod http;
mod imap;
pub mod imex;
mod scheduler;
@@ -103,7 +104,7 @@ mod dehtml;
mod authres;
mod color;
pub mod html;
pub mod net;
mod net;
pub mod plaintext;
pub mod summary;

View File

@@ -1976,10 +1976,10 @@ pub(crate) fn get_list_post(headers: &[MailHeader]) -> Option<String> {
.map(|s| s.addr)
}
fn get_all_addresses_from_header(
headers: &[MailHeader],
pred: fn(String) -> bool,
) -> Vec<SingleInfo> {
fn get_all_addresses_from_header<F>(headers: &[MailHeader], pred: F) -> Vec<SingleInfo>
where
F: Fn(String) -> bool,
{
let mut result: Vec<SingleInfo> = Default::default();
headers

View File

@@ -1,4 +1,4 @@
//! # Common network utilities.
///! # Common network utilities.
use std::net::{IpAddr, SocketAddr};
use std::pin::Pin;
use std::str::FromStr;
@@ -12,12 +12,9 @@ use tokio_io_timeout::TimeoutStream;
use crate::context::Context;
use crate::tools::time;
pub(crate) mod http;
pub(crate) mod session;
pub(crate) mod tls;
pub use http::{read_url, read_url_blob, Response as HttpResponse};
async fn connect_tcp_inner(addr: SocketAddr, timeout_val: Duration) -> Result<TcpStream> {
let tcp_stream = timeout(timeout_val, TcpStream::connect(addr))
.await

View File

@@ -1,94 +0,0 @@
//! # HTTP module.
use std::time::Duration;
use anyhow::{anyhow, Result};
use mime::Mime;
use crate::context::Context;
use crate::socks::Socks5Config;
const HTTP_TIMEOUT: Duration = Duration::from_secs(30);
/// HTTP(S) GET response.
#[derive(Debug)]
pub struct Response {
/// Response body.
pub blob: Vec<u8>,
/// MIME type exntracted from the `Content-Type` header, if any.
pub mimetype: Option<String>,
/// Encoding extracted from the `Content-Type` header, if any.
pub encoding: Option<String>,
}
/// Retrieves the text contents of URL using HTTP GET request.
pub async fn read_url(context: &Context, url: &str) -> Result<String> {
Ok(read_url_inner(context, url).await?.text().await?)
}
/// Retrieves the binary contents of URL using HTTP GET request.
pub async fn read_url_blob(context: &Context, url: &str) -> Result<Response> {
let response = read_url_inner(context, url).await?;
let content_type = response
.headers()
.get(reqwest::header::CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.and_then(|value| value.parse::<Mime>().ok());
let mimetype = content_type
.as_ref()
.map(|mime| mime.essence_str().to_string());
let encoding = content_type.as_ref().and_then(|mime| {
mime.get_param(mime::CHARSET)
.map(|charset| charset.as_str().to_string())
});
let blob: Vec<u8> = response.bytes().await?.into();
Ok(Response {
blob,
mimetype,
encoding,
})
}
async fn read_url_inner(context: &Context, url: &str) -> Result<reqwest::Response> {
let socks5_config = Socks5Config::from_database(&context.sql).await?;
let client = get_client(socks5_config)?;
let mut url = url.to_string();
// Follow up to 10 http-redirects
for _i in 0..10 {
let response = client.get(&url).send().await?;
if response.status().is_redirection() {
let headers = response.headers();
let header = headers
.get_all("location")
.iter()
.last()
.ok_or_else(|| anyhow!("Redirection doesn't have a target location"))?
.to_str()?;
info!(context, "Following redirect to {}", header);
url = header.to_string();
continue;
}
return Ok(response);
}
Err(anyhow!("Followed 10 redirections"))
}
pub(crate) fn get_client(socks5_config: Option<Socks5Config>) -> Result<reqwest::Client> {
let builder = reqwest::ClientBuilder::new().timeout(HTTP_TIMEOUT);
let builder = if let Some(socks5_config) = socks5_config {
let proxy = reqwest::Proxy::all(socks5_config.to_url())?;
builder.proxy(proxy)
} else {
// Disable usage of "system" proxy configured via environment variables.
// It is enabled by default in `reqwest`, see
// <https://docs.rs/reqwest/0.11.14/reqwest/struct.ClientBuilder.html#method.no_proxy>
// for documentation.
builder.no_proxy()
};
Ok(builder.build()?)
}

View File

@@ -160,7 +160,7 @@ pub(crate) async fn get_oauth2_access_token(
// ... and POST
let socks5_config = Socks5Config::from_database(&context.sql).await?;
let client = crate::net::http::get_client(socks5_config)?;
let client = crate::http::get_client(socks5_config)?;
let response: Response = match client.post(post_url).form(&post_param).send().await {
Ok(resp) => match resp.json().await {
@@ -291,7 +291,7 @@ impl Oauth2 {
// "picture": "https://lh4.googleusercontent.com/-Gj5jh_9R0BY/AAAAAAAAAAI/AAAAAAAAAAA/IAjtjfjtjNA/photo.jpg"
// }
let socks5_config = Socks5Config::from_database(&context.sql).await.ok()?;
let client = match crate::net::http::get_client(socks5_config) {
let client = match crate::http::get_client(socks5_config) {
Ok(cl) => cl,
Err(err) => {
warn!(context, "failed to get HTTP client: {}", err);

View File

@@ -534,7 +534,7 @@ struct CreateAccountErrorResponse {
async fn set_account_from_qr(context: &Context, qr: &str) -> Result<()> {
let url_str = &qr[DCACCOUNT_SCHEME.len()..];
let socks5_config = Socks5Config::from_database(&context.sql).await?;
let response = crate::net::http::get_client(socks5_config)?
let response = crate::http::get_client(socks5_config)?
.post(url_str)
.send()
.await?;

View File

@@ -96,7 +96,7 @@ pub(crate) async fn receive_imf_inner(
is_partial_download: Option<u32>,
fetching_existing_messages: bool,
) -> Result<Option<ReceivedMsg>> {
info!(context, "Receiving message, seen={seen}...");
info!(context, "Receiving message, seen={}...", seen);
if std::env::var(crate::DCC_MIME_DEBUG).is_ok() {
info!(
@@ -109,7 +109,7 @@ pub(crate) async fn receive_imf_inner(
let mut mime_parser = match MimeMessage::from_bytes(context, imf_raw, is_partial_download).await
{
Err(err) => {
warn!(context, "receive_imf: can't parse MIME: {err:#}.");
warn!(context, "receive_imf: can't parse MIME: {:#}", err);
let msg_ids;
if !rfc724_mid.starts_with(GENERATED_PREFIX) {
let row_id = context
@@ -138,11 +138,11 @@ pub(crate) async fn receive_imf_inner(
// we can not add even an empty record if we have no info whatsoever
if !mime_parser.has_headers() {
warn!(context, "receive_imf: no headers found.");
warn!(context, "receive_imf: no headers found");
return Ok(None);
}
info!(context, "Received message has Message-Id: {rfc724_mid}");
info!(context, "received message has Message-Id: {}", rfc724_mid);
// check, if the mail is already in our database.
// make sure, this check is done eg. before securejoin-processing.
@@ -182,7 +182,7 @@ pub(crate) async fn receive_imf_inner(
None => {
warn!(
context,
"receive_imf: From field does not contain an acceptable address."
"receive_imf: From field does not contain an acceptable address"
);
return Ok(None);
}
@@ -269,13 +269,13 @@ pub(crate) async fn receive_imf_inner(
if from_id == ContactId::SELF {
if mime_parser.was_encrypted() {
if let Err(err) = context.execute_sync_items(sync_items).await {
warn!(context, "receive_imf cannot execute sync items: {err:#}.");
warn!(context, "receive_imf cannot execute sync items: {:#}", err);
}
} else {
warn!(context, "Sync items are not encrypted.");
warn!(context, "sync items are not encrypted.");
}
} else {
warn!(context, "Sync items not sent by self.");
warn!(context, "sync items not sent by self.");
}
}
@@ -284,7 +284,7 @@ pub(crate) async fn receive_imf_inner(
.receive_status_update(from_id, insert_msg_id, status_update)
.await
{
warn!(context, "receive_imf cannot update status: {err:#}.");
warn!(context, "receive_imf cannot update status: {:#}", err);
}
}
@@ -302,7 +302,10 @@ pub(crate) async fn receive_imf_inner(
)
.await
{
warn!(context, "receive_imf cannot update profile image: {err:#}.");
warn!(
context,
"receive_imf cannot update profile image: {:#}", err
);
};
}
}
@@ -328,7 +331,7 @@ pub(crate) async fn receive_imf_inner(
)
.await
{
warn!(context, "Cannot update contact status: {err:#}.");
warn!(context, "cannot update contact status: {:#}", err);
}
}
@@ -391,7 +394,7 @@ pub async fn from_field_to_contact_id(
Err(err) => {
warn!(
context,
"Cannot create a contact for the given From field: {err:#}."
"Cannot create a contact for the given From field: {:#}.", err
);
return Ok(None);
}
@@ -473,7 +476,7 @@ async fn add_parts(
// this message is a classic email not a chat-message nor a reply to one
match show_emails {
ShowEmails::Off => {
info!(context, "Classical email not shown (TRASH).");
info!(context, "Classical email not shown (TRASH)");
chat_id = Some(DC_CHAT_ID_TRASH);
allow_creation = false;
}
@@ -516,7 +519,7 @@ async fn add_parts(
securejoin_seen = false;
}
Err(err) => {
warn!(context, "Error in Secure-Join message handling: {err:#}.");
warn!(context, "Error in Secure-Join message handling: {:#}", err);
chat_id = Some(DC_CHAT_ID_TRASH);
securejoin_seen = true;
}
@@ -533,8 +536,7 @@ async fn add_parts(
if chat_id.is_none() && mime_parser.delivery_report.is_some() {
chat_id = Some(DC_CHAT_ID_TRASH);
info!(context, "Message is a DSN (TRASH).",);
markseen_on_imap_table(context, rfc724_mid).await.ok();
info!(context, "Message is a DSN (TRASH)",);
}
if chat_id.is_none() {
@@ -752,7 +754,7 @@ async fn add_parts(
chat_id = None;
}
Err(err) => {
warn!(context, "Error in Secure-Join watching: {err:#}.");
warn!(context, "Error in Secure-Join watching: {:#}", err);
chat_id = Some(DC_CHAT_ID_TRASH);
}
}
@@ -769,7 +771,7 @@ async fn add_parts(
if is_draft {
// Most mailboxes have a "Drafts" folder where constantly new emails appear but we don't actually want to show them
info!(context, "Email is probably just a draft (TRASH).");
info!(context, "Email is probably just a draft (TRASH)");
chat_id = Some(DC_CHAT_ID_TRASH);
}
@@ -861,15 +863,14 @@ async fn add_parts(
if fetching_existing_messages && mime_parser.decrypting_failed {
chat_id = Some(DC_CHAT_ID_TRASH);
// We are only gathering old messages on first start. We do not want to add loads of non-decryptable messages to the chats.
info!(context, "Existing non-decipherable message (TRASH).");
info!(context, "Existing non-decipherable message. (TRASH)");
}
if mime_parser.webxdc_status_update.is_some() && mime_parser.parts.len() == 1 {
if let Some(part) = mime_parser.parts.first() {
if part.typ == Viewtype::Text && part.msg.is_empty() {
chat_id = Some(DC_CHAT_ID_TRASH);
info!(context, "Message is a status update only (TRASH).");
markseen_on_imap_table(context, rfc724_mid).await.ok();
info!(context, "Message is a status update only (TRASH)");
}
}
}
@@ -879,7 +880,7 @@ async fn add_parts(
DC_CHAT_ID_TRASH
} else {
chat_id.unwrap_or_else(|| {
info!(context, "No chat id for message (TRASH).");
info!(context, "No chat id for message (TRASH)");
DC_CHAT_ID_TRASH
})
};
@@ -891,7 +892,10 @@ async fn add_parts(
match value.parse::<EphemeralTimer>() {
Ok(timer) => timer,
Err(err) => {
warn!(context, "Can't parse ephemeral timer \"{value}\": {err:#}.");
warn!(
context,
"can't parse ephemeral timer \"{}\": {:#}", value, err
);
EphemeralTimer::Disabled
}
}
@@ -911,7 +915,12 @@ async fn add_parts(
&& !mime_parser.parts.is_empty()
&& chat_id.get_ephemeral_timer(context).await? != ephemeral_timer
{
info!(context, "Received new ephemeral timer value {ephemeral_timer:?} for chat {chat_id}, checking if it should be applied.");
info!(
context,
"received new ephemeral timer value {:?} for chat {}, checking if it should be applied",
ephemeral_timer,
chat_id
);
if is_dc_message == MessengerMessage::Yes
&& get_previous_message(context, mime_parser)
.await?
@@ -927,7 +936,9 @@ async fn add_parts(
// have seen or sent ourselves, so we ignore incoming timer to prevent a rollback.
warn!(
context,
"Ignoring ephemeral timer change to {ephemeral_timer:?} for chat {chat_id} to avoid rollback.",
"ignoring ephemeral timer change to {:?} for chat {} to avoid rollback",
ephemeral_timer,
chat_id
);
} else if chat_id
.update_timestamp(context, Param::EphemeralSettingsTimestamp, sent_timestamp)
@@ -939,12 +950,12 @@ async fn add_parts(
{
warn!(
context,
"Failed to modify timer for chat {chat_id}: {err:#}."
"failed to modify timer for chat {}: {:#}", chat_id, err
);
} else {
info!(
context,
"Updated ephemeral timer to {ephemeral_timer:?} for chat {chat_id}."
"updated ephemeral timer to {:?} for chat {}", ephemeral_timer, chat_id
);
if mime_parser.is_system_message != SystemMessage::EphemeralTimerChanged {
chat::add_info_msg(
@@ -959,7 +970,7 @@ async fn add_parts(
} else {
warn!(
context,
"Ignoring ephemeral timer change to {ephemeral_timer:?} because it is outdated."
"ignoring ephemeral timer change to {:?} because it's outdated", ephemeral_timer
);
}
}
@@ -988,7 +999,7 @@ async fn add_parts(
if chat.is_protected() || new_status.is_some() {
if let Err(err) = check_verified_properties(context, mime_parser, from_id, to_ids).await
{
warn!(context, "Verification problem: {err:#}.");
warn!(context, "verification problem: {:#}", err);
let s = format!("{err}. See 'Info' for more details");
mime_parser.repl_msg_by_error(&s);
} else {
@@ -1418,7 +1429,7 @@ async fn lookup_chat_by_reply(
info!(
context,
"Assigning message to {} as it's a reply to {}.", parent_chat.id, parent.rfc724_mid
"Assigning message to {} as it's a reply to {}", parent_chat.id, parent.rfc724_mid
);
return Ok(Some((parent_chat.id, parent_chat.blocked)));
}
@@ -1488,7 +1499,7 @@ async fn create_or_lookup_group(
.map(|chat_id| (chat_id, create_blocked));
return Ok(res);
} else {
info!(context, "Creating ad-hoc group prevented from caller.");
info!(context, "creating ad-hoc group prevented from caller");
return Ok(None);
};
@@ -1514,7 +1525,7 @@ async fn create_or_lookup_group(
let create_protected = if mime_parser.get_header(HeaderDef::ChatVerified).is_some() {
if let Err(err) = check_verified_properties(context, mime_parser, from_id, to_ids).await {
warn!(context, "Verification problem: {err:#}.");
warn!(context, "verification problem: {:#}", err);
let s = format!("{err}. See 'Info' for more details");
mime_parser.repl_msg_by_error(&s);
}
@@ -1546,7 +1557,7 @@ async fn create_or_lookup_group(
{
// Group does not exist but should be created.
if !allow_creation {
info!(context, "Creating group forbidden by caller.");
info!(context, "creating group forbidden by caller");
return Ok(None);
}
@@ -1607,7 +1618,7 @@ async fn create_or_lookup_group(
} else {
// The message was decrypted successfully, but contains a late "quit" or otherwise
// unwanted message.
info!(context, "Message belongs to unwanted group (TRASH).");
info!(context, "message belongs to unwanted group (TRASH)");
Ok(Some((DC_CHAT_ID_TRASH, Blocked::Not)))
}
}
@@ -1647,7 +1658,7 @@ async fn apply_group_changes(
Some(stock_str::msg_del_member(context, &removed_addr, from_id).await)
};
}
None => warn!(context, "Removed {removed_addr:?} has no contact_id."),
None => warn!(context, "removed {:?} has no contact_id", removed_addr),
}
} else {
removed_id = None;
@@ -1672,7 +1683,7 @@ async fn apply_group_changes(
.update_timestamp(context, Param::GroupNameTimestamp, sent_timestamp)
.await?
{
info!(context, "Updating grpname for chat {chat_id}.");
info!(context, "updating grpname for chat {}", chat_id);
context
.sql
.execute(
@@ -1712,7 +1723,7 @@ async fn apply_group_changes(
if mime_parser.get_header(HeaderDef::ChatVerified).is_some() {
if let Err(err) = check_verified_properties(context, mime_parser, from_id, to_ids).await {
warn!(context, "Verification problem: {err:#}.");
warn!(context, "verification problem: {:#}", err);
let s = format!("{err}. See 'Info' for more details");
mime_parser.repl_msg_by_error(&s);
}
@@ -1732,7 +1743,9 @@ async fn apply_group_changes(
{
warn!(
context,
"Contact {from_id} attempts to modify group chat {chat_id} member list without being a member."
"Contact {} attempts to modify group chat {} member list without being a member.",
from_id,
chat_id
);
} else if chat_id
.update_timestamp(context, Param::MemberListTimestamp, sent_timestamp)
@@ -1765,7 +1778,10 @@ async fn apply_group_changes(
}
members_to_add.dedup();
info!(context, "Adding {members_to_add:?} to chat id={chat_id}.");
info!(
context,
"adding {:?} to chat id={}", members_to_add, chat_id
);
chat::add_to_chat_contacts_table(context, chat_id, &members_to_add).await?;
send_event_chat_modified = true;
}
@@ -1775,15 +1791,17 @@ async fn apply_group_changes(
if !chat::is_contact_in_chat(context, chat_id, ContactId::SELF).await? {
warn!(
context,
"Received group avatar update for group chat {chat_id} we are not a member of."
"Received group avatar update for group chat {} we are not a member of.", chat_id
);
} else if !chat::is_contact_in_chat(context, chat_id, from_id).await? {
warn!(
context,
"Contact {from_id} attempts to modify group chat {chat_id} avatar without being a member.",
"Contact {} attempts to modify group chat {} avatar without being a member.",
from_id,
chat_id
);
} else {
info!(context, "Group-avatar change for {chat_id}.");
info!(context, "group-avatar change for {}", chat_id);
if chat
.param
.update_timestamp(Param::AvatarTimestamp, sent_timestamp)?
@@ -1917,7 +1935,7 @@ async fn create_or_lookup_mailinglist(
.await
.with_context(|| {
format!(
"failed to create mailinglist '{}' for grpid={}",
"Failed to create mailinglist '{}' for grpid={}",
&name, &listid
)
})?;
@@ -1925,7 +1943,7 @@ async fn create_or_lookup_mailinglist(
chat::add_to_chat_contacts_table(context, chat_id, &[ContactId::SELF]).await?;
Ok(Some((chat_id, blocked)))
} else {
info!(context, "Creating list forbidden by caller.");
info!(context, "creating list forbidden by caller");
Ok(None)
}
}
@@ -2017,7 +2035,7 @@ async fn create_adhoc_group(
if mime_parser.is_mailinglist_message() {
info!(
context,
"Not creating ad-hoc group for mailing list message."
"not creating ad-hoc group for mailing list message"
);
return Ok(None);
@@ -2034,13 +2052,13 @@ async fn create_adhoc_group(
// Instead, assign the message to 1:1 chat with the sender.
warn!(
context,
"Not creating ad-hoc group for message that cannot be decrypted."
"not creating ad-hoc group for message that cannot be decrypted"
);
return Ok(None);
}
if member_ids.len() < 3 {
info!(context, "Not creating ad-hoc group: too few contacts.");
info!(context, "not creating ad-hoc group: too few contacts");
return Ok(None);
}
@@ -2147,7 +2165,7 @@ async fn check_verified_properties(
for (to_addr, mut is_verified) in rows {
info!(
context,
"check_verified_properties: {:?} self={:?}.",
"check_verified_properties: {:?} self={:?}",
to_addr,
context.is_self_addr(&to_addr).await
);

View File

@@ -5,7 +5,7 @@ use anyhow::{bail, Context as _, Result};
use async_channel::{self as channel, Receiver, Sender};
use futures::future::try_join_all;
use futures_lite::FutureExt;
use tokio::sync::{oneshot, RwLock, RwLockWriteGuard};
use tokio::sync::{RwLock, RwLockWriteGuard};
use tokio::task;
use self::connectivity::ConnectivityStore;
@@ -13,7 +13,6 @@ use crate::config::Config;
use crate::contact::{ContactId, RecentlySeenLoop};
use crate::context::Context;
use crate::ephemeral::{self, delete_expired_imap_messages};
use crate::events::EventType;
use crate::imap::{FolderMeaning, Imap};
use crate::job;
use crate::location;
@@ -90,28 +89,20 @@ impl SchedulerState {
/// Pauses the IO scheduler.
///
/// If it is currently running the scheduler will be stopped. When the
/// [`IoPausedGuard`] is dropped the scheduler is started again.
/// If it is currently running the scheduler will be stopped. When
/// [`IoPausedGuard::resume`] is called the scheduler is started again.
///
/// If in the meantime [`SchedulerState::start`] or [`SchedulerState::stop`] is called
/// resume will do the right thing and restore the scheduler to the state requested by
/// the last call.
pub(crate) async fn pause<'a>(&'_ self, context: Context) -> IoPausedGuard {
{
let mut inner = self.inner.write().await;
inner.paused = true;
Self::do_stop(inner, &context).await;
let mut inner = self.inner.write().await;
inner.paused = true;
Self::do_stop(inner, &context).await;
IoPausedGuard {
context,
done: false,
}
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
rx.await.ok();
let mut inner = context.scheduler.inner.write().await;
inner.paused = false;
if inner.started && inner.scheduler.is_none() {
SchedulerState::do_start(inner, context.clone()).await;
}
});
IoPausedGuard { sender: Some(tx) }
}
/// Restarts the scheduler, only if it is running.
@@ -203,21 +194,31 @@ struct InnerSchedulerState {
paused: bool,
}
/// Guard to make sure the IO Scheduler is resumed.
///
/// Returned by [`SchedulerState::pause`]. To resume the IO scheduler simply drop this
/// guard.
#[derive(Debug)]
pub(crate) struct IoPausedGuard {
sender: Option<oneshot::Sender<()>>,
context: Context,
done: bool,
}
impl IoPausedGuard {
pub(crate) async fn resume(&mut self) {
self.done = true;
let mut inner = self.context.scheduler.inner.write().await;
inner.paused = false;
if inner.started && inner.scheduler.is_none() {
SchedulerState::do_start(inner, self.context.clone()).await;
}
}
}
impl Drop for IoPausedGuard {
fn drop(&mut self) {
if let Some(sender) = self.sender.take() {
// Can only fail if receiver is dropped, but then we're already resumed.
sender.send(()).ok();
if self.done {
return;
}
// Async .resume() should be called manually due to lack of async drop.
error!(self.context, "Pause guard dropped without resuming.");
}
}
@@ -478,7 +479,6 @@ async fn fetch_idle(
connection.connectivity.set_connected(ctx).await;
ctx.emit_event(EventType::ImapInboxIdle);
if let Some(session) = connection.session.take() {
if !session.can_idle() {
info!(

View File

@@ -552,7 +552,7 @@ pub(crate) async fn send_msg_to_smtp(
}
info!(
context,
"Try number {retries} to send message {msg_id} (entry {rowid}) over SMTP"
"Try number {} to send message {} over SMTP", retries, msg_id
);
let recipients_list = recipients
@@ -576,13 +576,8 @@ pub(crate) async fn send_msg_to_smtp(
{
info!(
context,
"Sending of message {msg_id} (entry {rowid}) was cancelled by the user."
"Sending of message {} was cancelled by the user.", msg_id
);
context
.sql
.execute("DELETE FROM smtp WHERE id=?", (rowid,))
.await
.context("failed to remove cancelled message from smtp table")?;
return Ok(());
}
@@ -654,8 +649,6 @@ pub(crate) async fn send_smtp_messages(context: &Context, connection: &mut Smtp)
},
)
.await?;
info!(context, "Selected rows from SMTP queue: {rowids:?}.");
for rowid in rowids {
send_msg_to_smtp(context, connection, rowid)
.await

View File

@@ -64,11 +64,9 @@ impl Smtp {
if let Some(ref mut transport) = self.transport {
transport.send(mail).await.map_err(Error::SmtpSend)?;
let info_msg = format!(
"Message len={message_len_bytes} was SMTP-sent to {recipients_display}"
);
info!(context, "{info_msg}.");
context.emit_event(EventType::SmtpMessageSent(info_msg));
context.emit_event(EventType::SmtpMessageSent(format!(
"Message len={message_len_bytes} was smtp-sent to {recipients_display}"
)));
self.last_success = Some(std::time::SystemTime::now());
} else {
warn!(

View File

@@ -699,26 +699,26 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
if let Err(err) = remove_unused_files(context).await {
warn!(
context,
"Housekeeping: cannot remove unused files: {:#}.", err
"Housekeeping: cannot remove unused files: {:#}", err
);
}
if let Err(err) = start_ephemeral_timers(context).await {
warn!(
context,
"Housekeeping: cannot start ephemeral timers: {:#}.", err
"Housekeeping: cannot start ephemeral timers: {:#}", err
);
}
if let Err(err) = prune_tombstones(&context.sql).await {
warn!(
context,
"Housekeeping: Cannot prune message tombstones: {:#}.", err
"Housekeeping: Cannot prune message tombstones: {:#}", err
);
}
if let Err(err) = deduplicate_peerstates(&context.sql).await {
warn!(context, "Failed to deduplicate peerstates: {:#}.", err)
warn!(context, "Failed to deduplicate peerstates: {:#}", err)
}
context.schedule_quota_update().await?;
@@ -731,7 +731,7 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
.await
{
Err(err) => {
warn!(context, "Failed to run incremental vacuum: {err:#}.");
warn!(context, "Failed to run incremental vacuum: {err:#}");
}
Ok(Some(())) => {
// Incremental vacuum returns a zero-column result if it did anything.
@@ -747,7 +747,7 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
.set_config(Config::LastHousekeeping, Some(&time().to_string()))
.await
{
warn!(context, "Can't set config: {e:#}.");
warn!(context, "Can't set config: {}", e);
}
context
@@ -814,7 +814,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
.await
.context("housekeeping: failed to SELECT value FROM config")?;
info!(context, "{} files in use.", files_in_use.len());
info!(context, "{} files in use.", files_in_use.len(),);
/* go through directories and delete unused files */
let blobdir = context.get_blobdir();
for p in [&blobdir.join(BLOBS_BACKUP_NAME), blobdir] {
@@ -846,7 +846,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
// environment f.e. So, no warning.
info!(
context,
"Housekeeping: Cannot rmdir {}: {:#}.",
"Housekeeping: Cannot rmdir {}: {:#}",
entry.path().display(),
e
);
@@ -868,7 +868,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
{
info!(
context,
"Housekeeping: Keeping new unreferenced file #{}: {:?}.",
"Housekeeping: Keeping new unreferenced file #{}: {:?}",
unreferenced_count,
entry.file_name(),
);
@@ -879,7 +879,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
}
info!(
context,
"Housekeeping: Deleting unreferenced file #{}: {:?}.",
"Housekeeping: Deleting unreferenced file #{}: {:?}",
unreferenced_count,
entry.file_name()
);
@@ -897,7 +897,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
Err(err) => {
warn!(
context,
"Housekeeping: Cannot read dir {}: {:#}.",
"Housekeeping: Cannot read dir {}: {:#}",
p.display(),
err
);

View File

@@ -407,9 +407,6 @@ pub enum StockMessage {
#[strum(props(fallback = "Scan to set up second device for %1$s"))]
BackupTransferQr = 162,
#[strum(props(fallback = " Account transferred to your second device."))]
BackupTransferMsgBody = 163,
}
impl StockMessage {
@@ -1264,10 +1261,6 @@ pub(crate) async fn backup_transfer_qr(context: &Context) -> Result<String> {
.replace1(&full_name))
}
pub(crate) async fn backup_transfer_msg_body(context: &Context) -> String {
translated(context, StockMessage::BackupTransferMsgBody).await
}
impl Context {
/// Set the stock string for the [StockMessage].
///

View File

@@ -165,19 +165,6 @@ pub(crate) struct StatusUpdateItemAndSerial {
max_serial: StatusUpdateSerial,
}
/// Returns an entry index and a reference.
fn find_zip_entry<'a>(
file: &'a async_zip::ZipFile,
name: &str,
) -> Option<(usize, &'a async_zip::StoredZipEntry)> {
for (i, ent) in file.entries().iter().enumerate() {
if ent.entry().filename() == name {
return Some((i, ent));
}
}
None
}
impl Context {
/// check if a file is an acceptable webxdc for sending or receiving.
pub(crate) async fn is_webxdc_file(&self, filename: &str, file: &[u8]) -> Result<bool> {
@@ -193,7 +180,7 @@ impl Context {
return Ok(false);
}
let archive = match async_zip::read::mem::ZipFileReader::new(file.to_vec()).await {
let archive = match async_zip::read::mem::ZipFileReader::new(file).await {
Ok(archive) => archive,
Err(_) => {
info!(self, "{} cannot be opened as zip-file", &filename);
@@ -201,7 +188,7 @@ impl Context {
}
};
if find_zip_entry(archive.file(), "index.html").is_none() {
if archive.entry("index.html").is_none() {
info!(self, "{} misses index.html", &filename);
return Ok(false);
}
@@ -228,7 +215,7 @@ impl Context {
let valid = match async_zip::read::fs::ZipFileReader::new(path).await {
Ok(archive) => {
if find_zip_entry(archive.file(), "index.html").is_none() {
if archive.entry("index.html").is_none() {
info!(self, "{} misses index.html", filename);
false
} else {
@@ -662,9 +649,10 @@ fn parse_webxdc_manifest(bytes: &[u8]) -> Result<WebxdcManifest> {
}
async fn get_blob(archive: &mut async_zip::read::fs::ZipFileReader, name: &str) -> Result<Vec<u8>> {
let (i, _) = find_zip_entry(archive.file(), name)
let (i, _) = archive
.entry(name)
.ok_or_else(|| anyhow!("no entry found for {}", name))?;
let mut reader = archive.entry(i).await?;
let mut reader = archive.entry_reader(i).await?;
let mut buf = Vec::new();
reader.read_to_end(&mut buf).await?;
Ok(buf)
@@ -766,9 +754,9 @@ impl Message {
} else {
self.get_filename().unwrap_or_default()
},
icon: if find_zip_entry(archive.file(), "icon.png").is_some() {
icon: if archive.entry("icon.png").is_some() {
"icon.png".to_string()
} else if find_zip_entry(archive.file(), "icon.jpg").is_some() {
} else if archive.entry("icon.jpg").is_some() {
"icon.jpg".to_string()
} else {
WEBXDC_DEFAULT_ICON.to_string()