Compare commits

..

1 Commits

Author SHA1 Message Date
Floris Bruynooghe
4f93a3890a fix(imex): Transfer large collection of blobs
There was a bug in iroh when transferring a large collection of blobs,
not the entire collection index was transferred due to buffer issues.
2023-04-04 10:51:42 +02:00
48 changed files with 778 additions and 1594 deletions

View File

@@ -16,11 +16,11 @@ env:
RUSTFLAGS: -Dwarnings
jobs:
lint_rust:
name: Lint Rust
lint:
name: Rustfmt and Clippy
runs-on: ubuntu-latest
env:
RUSTUP_TOOLCHAIN: 1.68.2
RUSTUP_TOOLCHAIN: 1.68.0
steps:
- uses: actions/checkout@v3
- name: Install rustfmt and clippy
@@ -31,8 +31,6 @@ jobs:
run: cargo fmt --all -- --check
- name: Run clippy
run: scripts/clippy.sh
- name: Check
run: cargo check --workspace --all-targets --all-features
cargo_deny:
name: cargo deny
@@ -66,28 +64,34 @@ jobs:
- name: Rustdoc
run: cargo doc --document-private-items --no-deps
rust_tests:
name: Rust tests
build_and_test:
name: Build and test
strategy:
fail-fast: false
matrix:
include:
# Currently used Rust version.
- os: ubuntu-latest
rust: 1.68.2
rust: 1.68.0
python: 3.9
- os: windows-latest
rust: 1.68.2
rust: 1.68.0
python: false # Python bindings compilation on Windows is not supported.
- os: macos-latest
rust: 1.68.2
rust: 1.68.0
python: 3.9
# Minimum Supported Rust Version = 1.65.0
# Minimum Supported Rust Version = 1.64.0
#
# Minimum Supported Python Version = 3.7
# This is the minimum version for which manylinux Python wheels are
# built.
- os: ubuntu-latest
rust: 1.65.0
rust: 1.64.0
python: 3.7
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@master
- name: Install Rust ${{ matrix.rust }}
run: rustup toolchain install --profile minimal ${{ matrix.rust }}
@@ -96,176 +100,64 @@ jobs:
- name: Cache rust cargo artifacts
uses: swatinem/rust-cache@v2
- name: Check
run: cargo check --workspace --bins --examples --tests --benches
- name: Tests
run: cargo test --workspace
- name: Test cargo vendor
run: cargo vendor
c_library:
name: Build C library
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- name: Cache rust cargo artifacts
uses: swatinem/rust-cache@v2
- name: Build C library
run: cargo build -p deltachat_ffi --features jsonrpc
- name: Upload C library
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.os }}-${{matrix.rust}}-libdeltachat.a
path: target/debug/libdeltachat.a
retention-days: 1
rpc_server:
name: Build deltachat-rpc-server
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- name: Cache rust cargo artifacts
uses: swatinem/rust-cache@v2
- name: Build deltachat-rpc-server
run: cargo build -p deltachat-rpc-server
- name: Upload deltachat-rpc-server
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.os }}-${{matrix.rust}}-deltachat-rpc-server
path: target/debug/deltachat-rpc-server
retention-days: 1
python_lint:
name: Python lint
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Install tox
run: pip install tox
- name: Lint Python bindings
working-directory: python
run: tox -e lint
- name: Lint deltachat-rpc-client
working-directory: deltachat-rpc-client
run: tox -e lint
python_tests:
name: Python tests
needs: ["c_library", "python_lint"]
strategy:
fail-fast: false
matrix:
include:
# Currently used Rust version.
- os: ubuntu-latest
python: 3.11
- os: macos-latest
python: 3.11
# PyPy tests
- os: ubuntu-latest
python: pypy3.9
- os: macos-latest
python: pypy3.9
# Minimum Supported Python Version = 3.7
# This is the minimum version for which manylinux Python wheels are
# built. Test it with minimum supported Rust version.
- os: ubuntu-latest
python: 3.7
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- name: Download libdeltachat.a
uses: actions/download-artifact@v3
with:
name: ${{ matrix.os }}-${{matrix.rust}}-libdeltachat.a
path: target/debug
- name: Install python
if: ${{ matrix.python }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Install tox
if: ${{ matrix.python }}
run: pip install tox
- name: Build C library
if: ${{ matrix.python }}
run: cargo build -p deltachat_ffi --features jsonrpc
- name: Run python tests
if: ${{ matrix.python }}
env:
DCC_NEW_TMP_EMAIL: ${{ secrets.DCC_NEW_TMP_EMAIL }}
DCC_RS_TARGET: debug
DCC_RS_DEV: ${{ github.workspace }}
working-directory: python
run: tox -e mypy,doc,py
run: tox -e lint,mypy,doc,py3
aysnc_python_tests:
name: Async Python tests
needs: ["python_lint", "rpc_server"]
strategy:
fail-fast: false
matrix:
include:
# Currently used Rust version.
- os: ubuntu-latest
python: 3.11
- os: macos-latest
python: 3.11
# PyPy tests
- os: ubuntu-latest
python: pypy3.9
- os: macos-latest
python: pypy3.9
# Minimum Supported Python Version = 3.7
# This is the minimum version for which manylinux Python wheels are
# built. Test it with minimum supported Rust version.
- os: ubuntu-latest
python: 3.7
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- name: Install python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Install tox
run: pip install tox
- name: Download deltachat-rpc-server
uses: actions/download-artifact@v3
with:
name: ${{ matrix.os }}-${{matrix.rust}}-deltachat-rpc-server
path: target/debug
- name: Make deltachat-rpc-server executable
run: chmod +x target/debug/deltachat-rpc-server
- name: Build deltachat-rpc-server
if: ${{ matrix.python }}
run: cargo build -p deltachat-rpc-server
- name: Add deltachat-rpc-server to path
if: ${{ matrix.python }}
run: echo ${{ github.workspace }}/target/debug >> $GITHUB_PATH
- name: Run deltachat-rpc-client tests
if: ${{ matrix.python }}
env:
DCC_NEW_TMP_EMAIL: ${{ secrets.DCC_NEW_TMP_EMAIL }}
working-directory: deltachat-rpc-client
run: tox -e py
run: tox -e py3,lint
- name: Install pypy
if: ${{ matrix.python }}
uses: actions/setup-python@v4
with:
python-version: "pypy${{ matrix.python }}"
- name: Run pypy tests
if: ${{ matrix.python }}
env:
DCC_NEW_TMP_EMAIL: ${{ secrets.DCC_NEW_TMP_EMAIL }}
DCC_RS_TARGET: debug
DCC_RS_DEV: ${{ github.workspace }}
working-directory: python
run: tox -e pypy3

View File

@@ -15,7 +15,7 @@ jobs:
# Build a version statically linked against musl libc
# to avoid problems with glibc version incompatibility.
build_linux:
name: Cross-compile deltachat-rpc-server for x86_64, i686, aarch64 and armv7 Linux
name: Cross-compile deltachat-rpc-server for x86_64, aarch64 and armv7 Linux
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
@@ -30,13 +30,6 @@ jobs:
path: target/x86_64-unknown-linux-musl/release/deltachat-rpc-server
if-no-files-found: error
- name: Upload i686 binary
uses: actions/upload-artifact@v3
with:
name: deltachat-rpc-server-i686
path: target/i686-unknown-linux-musl/release/deltachat-rpc-server
if-no-files-found: error
- name: Upload aarch64 binary
uses: actions/upload-artifact@v3
with:
@@ -97,7 +90,7 @@ jobs:
- name: Compose dist/ directory
run: |
mkdir dist
for x in x86_64 i686 aarch64 armv7 win32.exe win64.exe; do
for x in x86_64 aarch64 armv7 win32.exe win64.exe; do
mv "deltachat-rpc-server-$x"/* "dist/deltachat-rpc-server-$x"
done

View File

@@ -1,27 +1,5 @@
# Changelog
## [Unreleased]
### Changes
- Increase MSRV to 1.65.0. #4236
- Remove upper limit on the attachment size. #4253
- Update rPGP to 0.10.1. #4236
- Compress `mime_headers` column with HTML emails stored in database
### Fixes
- Fix python bindings README documentation on installing the bindings from source.
- Show a warning if quota list is empty #4261
## [1.112.6] - 2023-04-04
### Changes
- Add a device message after backup transfer #4301
### Fixed
- Updated `iroh` from 0.4.0 to 0.4.1 to fix transfer of large accounts with many blob files.
## [1.112.5] - 2023-04-02
### Fixes
@@ -42,8 +20,8 @@
### Changes
- Update iroh, remove `default-net` from `[patch.crates-io]` section.
- transfer backup: Connect to multiple provider addresses concurrently. This should speed up connection time significantly on the getter side. #4240
- Make sure BackupProvider is cancelled on drop (or `dc_backup_provider_unref`). The BackupProvider will now always finish with an IMEX event of 1000 or 0, previously it would sometimes finished with 1000 (success) when it really was 0 (failure). #4242
- transfer backup: Connect to mutliple provider addresses concurrently. This should speed up connection time significantly on the getter side. #4240
- Make sure BackupProvider is cancelled on drop (or dc_backup_provider_unref). The BackupProvider will now alaway finish with an IMEX event of 1000 or 0, previoulsy it would sometimes finishe with 1000 (success) when it really was 0 (failure). #4242
### Fixes
- Do not return media from trashed messages in the "All media" view. #4247
@@ -2378,7 +2356,6 @@ For a full list of changes, please see our closed Pull Requests:
https://github.com/deltachat/deltachat-core-rust/pulls?q=is%3Apr+is%3Aclosed
[unreleased]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.5...HEAD
[1.111.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.110.0...v1.111.0
[1.112.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.111.0...v1.112.0
[1.112.1]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.0...v1.112.1

885
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,9 @@
[package]
name = "deltachat"
version = "1.112.6"
version = "1.112.5"
edition = "2021"
license = "MPL-2.0"
rust-version = "1.65"
rust-version = "1.64"
[profile.dev]
debug = 0
@@ -35,13 +35,13 @@ ratelimit = { path = "./deltachat-ratelimit" }
anyhow = "1"
async-channel = "1.8.0"
async-imap = { version = "0.7.0", default-features = false, features = ["runtime-tokio"] }
async-imap = { git = "https://github.com/async-email/async-imap", branch = "master", default-features = false, features = ["runtime-tokio"] }
async-native-tls = { version = "0.5", default-features = false, features = ["runtime-tokio"] }
async-smtp = { version = "0.9", default-features = false, features = ["runtime-tokio"] }
async_zip = { version = "0.0.12", default-features = false, features = ["deflate", "fs"] }
async_zip = { version = "0.0.11", default-features = false, features = ["deflate", "fs"] }
backtrace = "0.3"
base64 = "0.21"
brotli = "3.3"
bitflags = "1.3"
chrono = { version = "0.4", default-features=false, features = ["clock", "std"] }
email = { git = "https://github.com/deltachat/rust-email", branch = "master" }
encoded-words = { git = "https://github.com/async-email/encoded-words", branch = "master" }
@@ -51,11 +51,7 @@ futures = "0.3"
futures-lite = "1.12.0"
hex = "0.4.0"
humansize = "2"
<<<<<<< HEAD
image = { version = "0.24.6", default-features=false, features = ["gif", "jpeg", "ico", "png", "pnm", "webp", "bmp"] }
=======
image = { version = "0.24.5", default-features=false, features = ["gif", "jpeg", "ico", "png", "pnm", "webp", "bmp"] }
>>>>>>> stable-1.112
iroh = { version = "0.4.1", default-features = false }
kamadak-exif = "0.5"
lettre_email = { git = "https://github.com/deltachat/lettre", branch = "master" }
@@ -67,14 +63,14 @@ num-traits = "0.2"
once_cell = "1.17.0"
percent-encoding = "2.2"
parking_lot = "0.12"
pgp = { version = "0.10", default-features = false }
pgp = { version = "0.9", default-features = false }
pretty_env_logger = { version = "0.4", optional = true }
qrcodegen = "1.7.0"
quick-xml = "0.28"
quick-xml = "0.27"
rand = "0.8"
regex = "1.7"
reqwest = { version = "0.11.16", features = ["json"] }
rusqlite = { version = "0.29", features = ["sqlcipher"] }
reqwest = { version = "0.11.14", features = ["json"] }
rusqlite = { version = "0.28", features = ["sqlcipher"] }
rust-hsluv = "0.1"
sanitize-filename = "0.4"
serde_json = "1.0"
@@ -105,7 +101,7 @@ log = "0.4"
pretty_env_logger = "0.4"
proptest = { version = "1", default-features = false, features = ["std"] }
tempfile = "3"
testdir = "0.7.3"
testdir = "0.7.2"
tokio = { version = "1", features = ["parking_lot", "rt-multi-thread", "macros"] }
[workspace]

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat_ffi"
version = "1.112.6"
version = "1.112.5"
description = "Deltachat FFI"
edition = "2018"
readme = "README.md"

View File

@@ -7069,11 +7069,6 @@ void dc_event_unref(dc_event_t* event);
/// `%1$s` will be replaced by name and address of the account.
#define DC_STR_BACKUP_TRANSFER_QR 162
/// "Account transferred to your second device."
///
/// Used as a device message after a successful backup transfer.
#define DC_STR_BACKUP_TRANSFER_MSG_BODY 163
/**
* @}
*/

View File

@@ -160,8 +160,7 @@ pub unsafe extern "C" fn dc_context_open(
let ctx = &*context;
let passphrase = to_string_lossy(passphrase);
block_on(ctx.open(passphrase))
.context("dc_context_open() failed")
.log_err(ctx)
.log_err(ctx, "dc_context_open() failed")
.map(|b| b as libc::c_int)
.unwrap_or(0)
}
@@ -217,18 +216,16 @@ pub unsafe extern "C" fn dc_set_config(
if key.starts_with("ui.") {
ctx.set_ui_config(&key, value.as_deref())
.await
.with_context(|| format!("dc_set_config failed: Can't set {key} to {value:?}"))
.log_err(ctx)
.with_context(|| format!("Can't set {key} to {value:?}"))
.log_err(ctx, "dc_set_config() failed")
.is_ok() as libc::c_int
} else {
match config::Config::from_str(&key) {
Ok(key) => ctx
.set_config(key, value.as_deref())
.await
.with_context(|| {
format!("dc_set_config() failed: Can't set {key} to {value:?}")
})
.log_err(ctx)
.with_context(|| format!("Can't set {key} to {value:?}"))
.log_err(ctx, "dc_set_config() failed")
.is_ok() as libc::c_int,
Err(_) => {
warn!(ctx, "dc_set_config(): invalid key");
@@ -256,8 +253,7 @@ pub unsafe extern "C" fn dc_get_config(
if key.starts_with("ui.") {
ctx.get_ui_config(&key)
.await
.context("Can't get ui-config")
.log_err(ctx)
.log_err(ctx, "Can't get ui-config")
.unwrap_or_default()
.unwrap_or_default()
.strdup()
@@ -266,8 +262,7 @@ pub unsafe extern "C" fn dc_get_config(
Ok(key) => ctx
.get_config(key)
.await
.context("Can't get config")
.log_err(ctx)
.log_err(ctx, "Can't get config")
.unwrap_or_default()
.unwrap_or_default()
.strdup(),
@@ -419,8 +414,7 @@ pub unsafe extern "C" fn dc_get_oauth2_url(
block_on(async move {
match oauth2::get_oauth2_url(ctx, &addr, &redirect)
.await
.context("dc_get_oauth2_url failed")
.log_err(ctx)
.log_err(ctx, "dc_get_oauth2_url failed")
{
Ok(Some(res)) => res.strdup(),
Ok(None) | Err(_) => ptr::null_mut(),
@@ -429,12 +423,7 @@ pub unsafe extern "C" fn dc_get_oauth2_url(
}
fn spawn_configure(ctx: Context) {
spawn(async move {
ctx.configure()
.await
.context("Configure failed")
.log_err(&ctx)
});
spawn(async move { ctx.configure().await.log_err(&ctx, "Configure failed") });
}
#[no_mangle]
@@ -459,8 +448,7 @@ pub unsafe extern "C" fn dc_is_configured(context: *mut dc_context_t) -> libc::c
block_on(async move {
ctx.is_configured()
.await
.context("failed to get configured state")
.log_err(ctx)
.log_err(ctx, "failed to get configured state")
.unwrap_or_default() as libc::c_int
})
}
@@ -802,8 +790,7 @@ pub unsafe extern "C" fn dc_preconfigure_keypair(
key::store_self_keypair(ctx, &keypair, key::KeyPairUse::Default).await?;
Ok::<_, anyhow::Error>(1)
})
.context("Failed to save keypair")
.log_err(ctx)
.log_err(ctx, "Failed to save keypair")
.unwrap_or(0)
}
@@ -830,8 +817,7 @@ pub unsafe extern "C" fn dc_get_chatlist(
block_on(async move {
match chatlist::Chatlist::try_load(ctx, flags as usize, qs.as_deref(), qi)
.await
.context("Failed to get chatlist")
.log_err(ctx)
.log_err(ctx, "Failed to get chatlist")
{
Ok(list) => {
let ffi_list = ChatlistWrapper { context, list };
@@ -856,8 +842,7 @@ pub unsafe extern "C" fn dc_create_chat_by_contact_id(
block_on(async move {
ChatId::create_for_contact(ctx, ContactId::new(contact_id))
.await
.context("Failed to create chat from contact_id")
.log_err(ctx)
.log_err(ctx, "Failed to create chat from contact_id")
.map(|id| id.to_u32())
.unwrap_or(0)
})
@@ -877,8 +862,7 @@ pub unsafe extern "C" fn dc_get_chat_id_by_contact_id(
block_on(async move {
ChatId::lookup_by_contact(ctx, ContactId::new(contact_id))
.await
.context("Failed to get chat for contact_id")
.log_err(ctx)
.log_err(ctx, "Failed to get chat for contact_id")
.unwrap_or_default() // unwraps the Result
.map(|id| id.to_u32())
.unwrap_or(0) // unwraps the Option
@@ -1020,8 +1004,7 @@ pub unsafe extern "C" fn dc_get_msg_reactions(
let ctx = &*context;
let reactions = if let Ok(reactions) = block_on(get_msg_reactions(ctx, MsgId::new(msg_id)))
.context("failed dc_get_msg_reactions() call")
.log_err(ctx)
.log_err(ctx, "failed dc_get_msg_reactions() call")
{
reactions
} else {
@@ -1049,8 +1032,7 @@ pub unsafe extern "C" fn dc_send_webxdc_status_update(
&to_string_lossy(json),
&to_string_lossy(descr),
))
.context("Failed to send webxdc update")
.log_err(ctx)
.log_err(ctx, "Failed to send webxdc update")
.is_ok() as libc::c_int
}
@@ -1269,8 +1251,7 @@ pub unsafe extern "C" fn dc_get_fresh_msgs(
let arr = dc_array_t::from(
ctx.get_fresh_msgs()
.await
.context("Failed to get fresh messages")
.log_err(ctx)
.log_err(ctx, "Failed to get fresh messages")
.unwrap_or_default()
.iter()
.map(|msg_id| msg_id.to_u32())
@@ -1291,8 +1272,7 @@ pub unsafe extern "C" fn dc_marknoticed_chat(context: *mut dc_context_t, chat_id
block_on(async move {
chat::marknoticed_chat(ctx, ChatId::new(chat_id))
.await
.context("Failed marknoticed chat")
.log_err(ctx)
.log_err(ctx, "Failed marknoticed chat")
.unwrap_or(())
})
}
@@ -1434,8 +1414,7 @@ pub unsafe extern "C" fn dc_set_chat_visibility(
ChatId::new(chat_id)
.set_visibility(ctx, visibility)
.await
.context("Failed setting chat visibility")
.log_err(ctx)
.log_err(ctx, "Failed setting chat visibility")
.unwrap_or(())
})
}
@@ -1452,9 +1431,7 @@ pub unsafe extern "C" fn dc_delete_chat(context: *mut dc_context_t, chat_id: u32
ChatId::new(chat_id)
.delete(ctx)
.await
.context("Failed chat delete")
.log_err(ctx)
.ok();
.ok_or_log_msg(ctx, "Failed chat delete");
})
}
@@ -1470,9 +1447,7 @@ pub unsafe extern "C" fn dc_block_chat(context: *mut dc_context_t, chat_id: u32)
ChatId::new(chat_id)
.block(ctx)
.await
.context("Failed chat block")
.log_err(ctx)
.ok();
.ok_or_log_msg(ctx, "Failed chat block");
})
}
@@ -1488,9 +1463,7 @@ pub unsafe extern "C" fn dc_accept_chat(context: *mut dc_context_t, chat_id: u32
ChatId::new(chat_id)
.accept(ctx)
.await
.context("Failed chat accept")
.log_err(ctx)
.ok();
.ok_or_log_msg(ctx, "Failed chat accept");
})
}
@@ -1588,8 +1561,7 @@ pub unsafe extern "C" fn dc_create_group_chat(
block_on(async move {
chat::create_group_chat(ctx, protect, &to_string_lossy(name))
.await
.context("Failed to create group chat")
.log_err(ctx)
.log_err(ctx, "Failed to create group chat")
.map(|id| id.to_u32())
.unwrap_or(0)
})
@@ -1603,8 +1575,7 @@ pub unsafe extern "C" fn dc_create_broadcast_list(context: *mut dc_context_t) ->
}
let ctx = &*context;
block_on(chat::create_broadcast_list(ctx))
.context("Failed to create broadcast list")
.log_err(ctx)
.log_err(ctx, "Failed to create broadcast list")
.map(|id| id.to_u32())
.unwrap_or(0)
}
@@ -1626,8 +1597,7 @@ pub unsafe extern "C" fn dc_is_contact_in_chat(
ChatId::new(chat_id),
ContactId::new(contact_id),
))
.context("is_contact_in_chat failed")
.log_err(ctx)
.log_err(ctx, "is_contact_in_chat failed")
.unwrap_or_default() as libc::c_int
}
@@ -1648,8 +1618,7 @@ pub unsafe extern "C" fn dc_add_contact_to_chat(
ChatId::new(chat_id),
ContactId::new(contact_id),
))
.context("Failed to add contact")
.log_err(ctx)
.log_err(ctx, "Failed to add contact")
.is_ok() as libc::c_int
}
@@ -1670,8 +1639,7 @@ pub unsafe extern "C" fn dc_remove_contact_from_chat(
ChatId::new(chat_id),
ContactId::new(contact_id),
))
.context("Failed to remove contact")
.log_err(ctx)
.log_err(ctx, "Failed to remove contact")
.is_ok() as libc::c_int
}
@@ -1790,8 +1758,7 @@ pub unsafe extern "C" fn dc_get_chat_ephemeral_timer(
// ignored when ephemeral timer value is used to construct
// message headers.
block_on(async move { ChatId::new(chat_id).get_ephemeral_timer(ctx).await })
.context("Failed to get ephemeral timer")
.log_err(ctx)
.log_err(ctx, "Failed to get ephemeral timer")
.unwrap_or_default()
.to_u32()
}
@@ -1812,8 +1779,7 @@ pub unsafe extern "C" fn dc_set_chat_ephemeral_timer(
ChatId::new(chat_id)
.set_ephemeral_timer(ctx, EphemeralTimer::from_u32(timer))
.await
.context("Failed to set ephemeral timer")
.log_err(ctx)
.log_err(ctx, "Failed to set ephemeral timer")
.is_ok() as libc::c_int
})
}
@@ -1889,8 +1855,7 @@ pub unsafe extern "C" fn dc_delete_msgs(
let msg_ids = convert_and_prune_message_ids(msg_ids, msg_cnt);
block_on(message::delete_msgs(ctx, &msg_ids))
.context("failed dc_delete_msgs() call")
.log_err(ctx)
.log_err(ctx, "failed dc_delete_msgs() call")
.ok();
}
@@ -1954,8 +1919,7 @@ pub unsafe extern "C" fn dc_markseen_msgs(
let ctx = &*context;
block_on(message::markseen_msgs(ctx, msg_ids))
.context("failed dc_markseen_msgs() call")
.log_err(ctx)
.log_err(ctx, "failed dc_markseen_msgs() call")
.ok();
}
@@ -1997,8 +1961,7 @@ pub unsafe extern "C" fn dc_download_full_msg(context: *mut dc_context_t, msg_id
}
let ctx = &*context;
block_on(MsgId::new(msg_id).download_full(ctx))
.context("Failed to download message fully.")
.log_err(ctx)
.log_err(ctx, "Failed to download message fully.")
.ok();
}
@@ -2046,8 +2009,7 @@ pub unsafe extern "C" fn dc_create_contact(
let name = to_string_lossy(name);
block_on(Contact::create(ctx, &name, &to_string_lossy(addr)))
.context("Cannot create contact")
.log_err(ctx)
.log_err(ctx, "Cannot create contact")
.map(|id| id.to_u32())
.unwrap_or(0)
}
@@ -2124,8 +2086,7 @@ pub unsafe extern "C" fn dc_get_blocked_contacts(
Box::into_raw(Box::new(dc_array_t::from(
Contact::get_all_blocked(ctx)
.await
.context("Can't get blocked contacts")
.log_err(ctx)
.log_err(ctx, "Can't get blocked contacts")
.unwrap_or_default()
.iter()
.map(|id| id.to_u32())
@@ -2150,15 +2111,11 @@ pub unsafe extern "C" fn dc_block_contact(
if block == 0 {
Contact::unblock(ctx, contact_id)
.await
.context("Can't unblock contact")
.log_err(ctx)
.ok();
.ok_or_log_msg(ctx, "Can't unblock contact");
} else {
Contact::block(ctx, contact_id)
.await
.context("Can't block contact")
.log_err(ctx)
.ok();
.ok_or_log_msg(ctx, "Can't block contact");
}
});
}
@@ -2231,8 +2188,7 @@ fn spawn_imex(ctx: Context, what: imex::ImexMode, param1: String, passphrase: Op
spawn(async move {
imex::imex(&ctx, what, param1.as_ref(), passphrase)
.await
.context("IMEX failed")
.log_err(&ctx)
.log_err(&ctx, "IMEX failed")
});
}
@@ -2418,8 +2374,7 @@ pub unsafe extern "C" fn dc_join_securejoin(
securejoin::join_securejoin(ctx, &to_string_lossy(qr))
.await
.map(|chatid| chatid.to_u32())
.context("failed dc_join_securejoin() call")
.log_err(ctx)
.log_err(ctx, "failed dc_join_securejoin() call")
.unwrap_or_default()
})
}
@@ -2441,8 +2396,7 @@ pub unsafe extern "C" fn dc_send_locations_to_chat(
ChatId::new(chat_id),
seconds as i64,
))
.context("Failed dc_send_locations_to_chat()")
.log_err(ctx)
.log_err(ctx, "Failed dc_send_locations_to_chat()")
.ok();
}
@@ -2525,8 +2479,7 @@ pub unsafe extern "C" fn dc_delete_all_locations(context: *mut dc_context_t) {
block_on(async move {
location::delete_all(ctx)
.await
.context("Failed to delete locations")
.log_err(ctx)
.log_err(ctx, "Failed to delete locations")
.ok()
});
}
@@ -2810,8 +2763,7 @@ pub unsafe extern "C" fn dc_chatlist_get_summary(
.list
.get_summary(ctx, index, maybe_chat)
.await
.context("get_summary failed")
.log_err(ctx)
.log_err(ctx, "get_summary failed")
.unwrap_or_default();
Box::into_raw(Box::new(summary.into()))
})
@@ -2839,8 +2791,7 @@ pub unsafe extern "C" fn dc_chatlist_get_summary2(
msg_id,
None,
))
.context("get_summary2 failed")
.log_err(ctx)
.log_err(ctx, "get_summary2 failed")
.unwrap_or_default();
Box::into_raw(Box::new(summary.into()))
}
@@ -3023,8 +2974,7 @@ pub unsafe extern "C" fn dc_chat_can_send(chat: *mut dc_chat_t) -> libc::c_int {
let ffi_chat = &*chat;
let ctx = &*ffi_chat.context;
block_on(ffi_chat.chat.can_send(ctx))
.context("can_send failed")
.log_err(ctx)
.log_err(ctx, "can_send failed")
.unwrap_or_default() as libc::c_int
}
@@ -3469,8 +3419,7 @@ pub unsafe extern "C" fn dc_msg_get_summary(
let ctx = &*ffi_msg.context;
let summary = block_on(ffi_msg.message.get_summary(ctx, maybe_chat))
.context("dc_msg_get_summary failed")
.log_err(ctx)
.log_err(ctx, "dc_msg_get_summary failed")
.unwrap_or_default();
Box::into_raw(Box::new(summary.into()))
}
@@ -3488,8 +3437,7 @@ pub unsafe extern "C" fn dc_msg_get_summarytext(
let ctx = &*ffi_msg.context;
let summary = block_on(ffi_msg.message.get_summary(ctx, None))
.context("dc_msg_get_summarytext failed")
.log_err(ctx)
.log_err(ctx, "dc_msg_get_summarytext failed")
.unwrap_or_default();
match usize::try_from(approx_characters) {
Ok(chars) => summary.truncated_text(chars).strdup(),
@@ -3756,9 +3704,7 @@ pub unsafe extern "C" fn dc_msg_latefiling_mediasize(
.message
.latefiling_mediasize(ctx, width, height, duration)
})
.context("Cannot set media size")
.log_err(ctx)
.ok();
.ok_or_log_msg(ctx, "Cannot set media size");
}
#[no_mangle]
@@ -3797,8 +3743,7 @@ pub unsafe extern "C" fn dc_msg_set_quote(msg: *mut dc_msg_t, quote: *const dc_m
.message
.set_quote(&*ffi_msg.context, quote_msg)
.await
.context("failed to set quote")
.log_err(&*ffi_msg.context)
.log_err(&*ffi_msg.context, "failed to set quote")
.ok();
});
}
@@ -3829,8 +3774,7 @@ pub unsafe extern "C" fn dc_msg_get_quoted_msg(msg: *const dc_msg_t) -> *mut dc_
.message
.quoted_message(context)
.await
.context("failed to get quoted message")
.log_err(context)
.log_err(context, "failed to get quoted message")
.unwrap_or(None)
});
@@ -3853,8 +3797,7 @@ pub unsafe extern "C" fn dc_msg_get_parent(msg: *const dc_msg_t) -> *mut dc_msg_
.message
.parent(context)
.await
.context("failed to get parent message")
.log_err(context)
.log_err(context, "failed to get parent message")
.unwrap_or(None)
});
@@ -4045,8 +3988,7 @@ pub unsafe extern "C" fn dc_contact_is_verified(contact: *mut dc_contact_t) -> l
let ctx = &*ffi_contact.context;
block_on(ffi_contact.contact.is_verified(ctx))
.context("is_verified failed")
.log_err(ctx)
.log_err(ctx, "is_verified failed")
.unwrap_or_default() as libc::c_int
}
@@ -4061,8 +4003,7 @@ pub unsafe extern "C" fn dc_contact_get_verifier_addr(
let ffi_contact = &*contact;
let ctx = &*ffi_contact.context;
block_on(ffi_contact.contact.get_verifier_addr(ctx))
.context("failed to get verifier for contact")
.log_err(ctx)
.log_err(ctx, "failed to get verifier for contact")
.unwrap_or_default()
.strdup()
}
@@ -4076,8 +4017,7 @@ pub unsafe extern "C" fn dc_contact_get_verifier_id(contact: *mut dc_contact_t)
let ffi_contact = &*contact;
let ctx = &*ffi_contact.context;
let verifier_contact_id = block_on(ffi_contact.contact.get_verifier_id(ctx))
.context("failed to get verifier")
.log_err(ctx)
.log_err(ctx, "failed to get verifier")
.unwrap_or_default()
.unwrap_or_default();
@@ -4229,8 +4169,8 @@ pub unsafe extern "C" fn dc_backup_provider_new(
provider,
})
.map(|ffi_provider| Box::into_raw(Box::new(ffi_provider)))
.log_err(ctx, "BackupProvider failed")
.context("BackupProvider failed")
.log_err(ctx)
.set_last_error(ctx)
.unwrap_or(ptr::null_mut())
}
@@ -4246,8 +4186,8 @@ pub unsafe extern "C" fn dc_backup_provider_get_qr(
let ffi_provider = &*provider;
let ctx = &*ffi_provider.context;
deltachat::qr::format_backup(&ffi_provider.provider.qr())
.log_err(ctx, "BackupProvider get_qr failed")
.context("BackupProvider get_qr failed")
.log_err(ctx)
.set_last_error(ctx)
.unwrap_or_default()
.strdup()
@@ -4265,8 +4205,8 @@ pub unsafe extern "C" fn dc_backup_provider_get_qr_svg(
let ctx = &*ffi_provider.context;
let provider = &ffi_provider.provider;
block_on(generate_backup_qr(ctx, &provider.qr()))
.log_err(ctx, "BackupProvider get_qr_svg failed")
.context("BackupProvider get_qr_svg failed")
.log_err(ctx)
.set_last_error(ctx)
.unwrap_or_default()
.strdup()
@@ -4282,8 +4222,8 @@ pub unsafe extern "C" fn dc_backup_provider_wait(provider: *mut dc_backup_provid
let ctx = &*ffi_provider.context;
let provider = &mut ffi_provider.provider;
block_on(provider)
.log_err(ctx, "Failed to await BackupProvider")
.context("Failed to await BackupProvider")
.log_err(ctx)
.set_last_error(ctx)
.ok();
}
@@ -4315,16 +4255,16 @@ pub unsafe extern "C" fn dc_receive_backup(
// user from deallocating it by calling unref on the object while we are using it.
fn receive_backup(ctx: Context, qr_text: String) -> libc::c_int {
let qr = match block_on(qr::check_qr(&ctx, &qr_text))
.log_err(&ctx, "Invalid QR code")
.context("Invalid QR code")
.log_err(&ctx)
.set_last_error(&ctx)
{
Ok(qr) => qr,
Err(_) => return 0,
};
match block_on(imex::get_backup(&ctx, qr))
.log_err(&ctx, "Get backup failed")
.context("Get backup failed")
.log_err(&ctx)
.set_last_error(&ctx)
{
Ok(_) => 1,
@@ -4376,8 +4316,8 @@ where
///
/// ```no_compile
/// some_dc_rust_api_call_returning_result()
/// .log_err(&context, "My API call failed")
/// .context("My API call failed")
/// .log_err(&context)
/// .set_last_error(&context)
/// .unwrap_or_default()
/// ```
@@ -4464,8 +4404,7 @@ pub unsafe extern "C" fn dc_provider_new_from_email_with_dns(
let socks5_enabled = block_on(async move {
ctx.get_config_bool(config::Config::Socks5Enabled)
.await
.context("Can't get config")
.log_err(ctx)
.log_err(ctx, "Can't get config")
});
match socks5_enabled {

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-jsonrpc"
version = "1.112.6"
version = "1.112.5"
description = "DeltaChat JSON-RPC API"
edition = "2021"
default-run = "deltachat-jsonrpc-server"
@@ -19,21 +19,21 @@ serde = { version = "1.0", features = ["derive"] }
tempfile = "3.3.0"
log = "0.4"
async-channel = { version = "1.8.0" }
futures = { version = "0.3.28" }
serde_json = "1.0.95"
futures = { version = "0.3.26" }
serde_json = "1.0.91"
yerpc = { version = "0.4.3", features = ["anyhow_expose"] }
typescript-type-def = { version = "0.5.5", features = ["json_value"] }
tokio = { version = "1.27.0" }
tokio = { version = "1.25.0" }
sanitize-filename = "0.4"
walkdir = "2.3.3"
walkdir = "2.3.2"
base64 = "0.21"
# optional dependencies
axum = { version = "0.6.12", optional = true, features = ["ws"] }
axum = { version = "0.6.11", optional = true, features = ["ws"] }
env_logger = { version = "0.10.0", optional = true }
[dev-dependencies]
tokio = { version = "1.27.0", features = ["full", "rt-multi-thread"] }
tokio = { version = "1.25.0", features = ["full", "rt-multi-thread"] }
[features]

View File

@@ -1082,17 +1082,17 @@ impl CommandApi {
}
/// Search messages containing the given query string.
/// Searching can be done globally (chat_id=None) or in a specified chat only (chat_id set).
/// Searching can be done globally (chat_id=0) or in a specified chat only (chat_id set).
///
/// Global search results are typically displayed using dc_msg_get_summary(), chat
/// search results may just highlight the corresponding messages and present a
/// Global chat results are typically displayed using dc_msg_get_summary(), chat
/// search results may just hilite the corresponding messages and present a
/// prev/next button.
///
/// For the global search, the result is limited to 1000 messages,
/// this allows an incremental search done fast.
/// So, when getting exactly 1000 messages, the result actually may be truncated;
/// the UIs may display sth. like "1000+ messages found" in this case.
/// The chat search (if chat_id is set) is not limited.
/// For global search, result is limited to 1000 messages,
/// this allows incremental search done fast.
/// So, when getting exactly 1000 results, the result may be truncated;
/// the UIs may display sth. as "1000+ messages found" in this case.
/// Chat search (if a chat_id is set) is not limited.
async fn search_messages(
&self,
account_id: u32,

View File

@@ -55,5 +55,5 @@
},
"type": "module",
"types": "dist/deltachat.d.ts",
"version": "1.112.6"
"version": "1.112.5"
}

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-repl"
version = "1.112.6"
version = "1.112.5"
license = "MPL-2.0"
edition = "2021"
@@ -8,10 +8,10 @@ edition = "2021"
ansi_term = "0.12.1"
anyhow = "1"
deltachat = { path = "..", features = ["internals"]}
dirs = "5"
dirs = "4"
log = "0.4.16"
pretty_env_logger = "0.4"
rusqlite = "0.29"
rusqlite = "0.28"
rustyline = "11"
tokio = { version = "1", features = ["fs", "rt-multi-thread", "macros"] }

View File

@@ -563,7 +563,7 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
context.maybe_network().await;
}
"housekeeping" => {
sql::housekeeping(&context).await.log_err(&context).ok();
sql::housekeeping(&context).await.ok_or_log(&context);
}
"listchats" | "listarchived" | "chats" => {
let listflags = if arg0 == "listarchived" {

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-rpc-server"
version = "1.112.6"
version = "1.112.5"
description = "DeltaChat JSON-RPC server"
edition = "2021"
readme = "README.md"
@@ -17,9 +17,9 @@ anyhow = "1"
env_logger = { version = "0.10.0" }
futures-lite = "1.12.0"
log = "0.4"
serde_json = "1.0.95"
serde_json = "1.0.91"
serde = { version = "1.0", features = ["derive"] }
tokio = { version = "1.27.0", features = ["io-std"] }
tokio = { version = "1.25.0", features = ["io-std"] }
yerpc = { version = "0.4.0", features = ["anyhow_expose"] }
[features]

View File

@@ -8,5 +8,5 @@ license = "MPL-2.0"
proc-macro = true
[dependencies]
syn = "2"
syn = "1"
quote = "1"

View File

@@ -13,46 +13,34 @@ ignore = [
# when upgrading.
# Please keep this list alphabetically sorted.
skip = [
{ name = "base16ct", version = "0.1.1" },
{ name = "base64", version = "<0.21" },
{ name = "bitflags", version = "1.3.2" },
{ name = "block-buffer", version = "<0.10" },
{ name = "clap_lex", version = "0.2.4" },
{ name = "clap", version = "3.2.23" },
{ name = "clap_lex", version = "0.2.4" },
{ name = "convert_case", version = "0.4.0" },
{ name = "curve25519-dalek", version = "3.2.0" },
{ name = "darling", version = "<0.14" },
{ name = "darling_core", version = "<0.14" },
{ name = "darling_macro", version = "<0.14" },
{ name = "darling", version = "<0.14" },
{ name = "der", version = "0.6.1" },
{ name = "digest", version = "<0.10" },
{ name = "ed25519-dalek", version = "1.0.1" },
{ name = "ed25519", version = "1.5.3" },
{ name = "env_logger", version = "<0.10" },
{ name = "getrandom", version = "<0.2" },
{ name = "hermit-abi", version = "<0.3" },
{ name = "humantime", version = "<2.1" },
{ name = "idna", version = "<0.3" },
{ name = "libm", version = "0.1.4" },
{ name = "pem-rfc7468", version = "0.6.0" },
{ name = "pkcs8", version = "0.9.0" },
{ name = "nom", version = "<7.1" },
{ name = "quick-error", version = "<2.0" },
{ name = "rand", version = "<0.8" },
{ name = "rand_chacha", version = "<0.3" },
{ name = "rand_core", version = "<0.6" },
{ name = "rand", version = "<0.8" },
{ name = "redox_syscall", version = "0.2.16" },
{ name = "sec1", version = "0.3.0" },
{ name = "sha2", version = "<0.10" },
{ name = "signature", version = "1.6.4" },
{ name = "spin", version = "<0.9.6" },
{ name = "spki", version = "0.6.0" },
{ name = "syn", version = "1.0.109" },
{ name = "time", version = "<0.3" },
{ name = "version_check", version = "<0.9" },
{ name = "wasi", version = "<0.11" },
{ name = "windows-sys", version = "<0.45" },
{ name = "windows_aarch64_msvc", version = "<0.42" },
{ name = "windows_i686_gnu", version = "<0.42" },
{ name = "windows_i686_msvc", version = "<0.42" },
{ name = "windows-sys", version = "<0.45" },
{ name = "windows_x86_64_gnu", version = "<0.42" },
{ name = "windows_x86_64_msvc", version = "<0.42" },
]

View File

@@ -60,5 +60,5 @@
"test:mocha": "mocha -r esm node/test/test.js --growl --reporter=spec --bail --exit"
},
"types": "node/dist/index.d.ts",
"version": "1.112.6"
"version": "1.112.5"
}

View File

@@ -12,17 +12,17 @@ a low-level Chat/Contact/Message API to user interfaces and bots.
Installing pre-built packages (Linux-only)
==========================================
If you have a Linux system you may install the ``deltachat`` binary "wheel" packages
If you have a Linux system you may try to install the ``deltachat`` binary "wheel" packages
without any "build-from-source" steps.
Otherwise you need to `compile the Delta Chat bindings yourself`__.
__ sourceinstall_
We recommend to first create a fresh Python virtual environment
and activate it in your shell::
We recommend to first `install virtualenv <https://virtualenv.pypa.io/en/stable/installation.html>`_,
then create a fresh Python virtual environment and activate it in your shell::
python -m venv env
source env/bin/activate
virtualenv env # or: python -m venv
source env/bin/activate
Afterwards, invoking ``python`` or ``pip install`` only
modifies files in your ``env`` directory and leaves
@@ -40,14 +40,16 @@ To verify it worked::
Running tests
=============
Recommended way to run tests is using `scripts/run-python-test.sh`
script provided in the core repository.
Recommended way to run tests is using `tox <https://tox.wiki>`_.
After successful binding installation you can install tox
and run the tests::
This script compiles the library in debug mode and runs the tests using `tox`_.
By default it will run all "offline" tests and skip all functional
pip install tox
tox -e py3
This will run all "offline" tests and skip all functional
end-to-end tests that require accounts on real e-mail servers.
.. _`tox`: https://tox.wiki
.. _livetests:
Running "live" tests with temporary accounts
@@ -59,32 +61,13 @@ Please feel free to contact us through a github issue or by e-mail and we'll sen
export DCC_NEW_TMP_EMAIL=<URL you got from us>
With this account-creation setting, pytest runs create ephemeral e-mail accounts on the http://testrun.org server.
These accounts are removed automatically as they expire.
After setting the variable, either rerun `scripts/run-python-test.sh`
or run offline and online tests with `tox` directly::
With this account-creation setting, pytest runs create ephemeral e-mail accounts on the http://testrun.org server. These accounts exists only for one hour and then are removed completely.
One hour is enough to invoke pytest and run all offline and online tests::
tox -e py
tox -e py3
Each test run creates new accounts.
Developing the bindings
-----------------------
If you want to develop or debug the bindings,
you can create a testing development environment using `tox`::
tox -c python --devenv env
. env/bin/activate
Inside this environment the bindings are installed
in editable mode (as if installed with `python -m pip install -e`)
together with the testing dependencies like `pytest` and its plugins.
You can then edit the source code in the development tree
and quickly run `pytest` manually without waiting for `tox`
to recreating the virtual environment each time.
.. _sourceinstall:
Installing bindings from source
@@ -106,34 +89,20 @@ To install the Delta Chat Python bindings make sure you have Python3 installed.
E.g. on Debian-based systems `apt install python3 python3-pip
python3-venv` should give you a usable python installation.
First, build the core library::
Ensure you are in the deltachat-core-rust/python directory, create the
virtual environment with dependencies using tox
and activate it in your shell::
cargo build --release -p deltachat_ffi --features jsonrpc
`jsonrpc` feature is required even if not used by the bindings
because `deltachat.h` includes JSON-RPC functions unconditionally.
Create the virtual environment and activate it:
python -m venv env
cd python
tox --devenv env
source env/bin/activate
Build and install the bindings:
You should now be able to build the python bindings using the supplied script::
export DCC_RS_DEV="$PWD"
export DCC_RS_TARGET=release
python -m pip install python
python3 install_python_bindings.py
`DCC_RS_DEV` environment variable specifies the location of
the core development tree. If this variable is not set,
`libdeltachat` library and `deltachat.h` header are expected
to be installed system-wide.
When `DCC_RS_DEV` is set, `DCC_RS_TARGET` specifies
the build profile name to look up the artifacts
in the target directory.
In this case setting it can be skipped because
`DCC_RS_TARGET=release` is the default.
The core compilation and bindings building might take a while,
depending on the speed of your machine.
Building manylinux based wheels
===============================

View File

@@ -24,8 +24,6 @@ def test_echo_quit_plugin(acfactory, lp):
lp.sec("creating a temp account to contact the bot")
(ac1,) = acfactory.get_online_accounts(1)
botproc.await_resync()
lp.sec("sending a message to the bot")
bot_contact = ac1.create_contact(botproc.addr)
bot_chat = bot_contact.create_chat()
@@ -42,7 +40,7 @@ def test_echo_quit_plugin(acfactory, lp):
def test_group_tracking_plugin(acfactory, lp):
lp.sec("creating one group-tracking bot and two temp accounts")
botproc = acfactory.run_bot_process(group_tracking)
botproc = acfactory.run_bot_process(group_tracking, ffi=False)
ac1, ac2 = acfactory.get_online_accounts(2)
@@ -54,8 +52,6 @@ def test_group_tracking_plugin(acfactory, lp):
ac1.add_account_plugin(FFIEventLogger(ac1))
ac2.add_account_plugin(FFIEventLogger(ac2))
botproc.await_resync()
lp.sec("creating bot test group with bot")
bot_contact = ac1.create_contact(botproc.addr)
ch = ac1.create_group_chat("bot test group")

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python3
"""
setup a python binding development in-place install with cargo debug symbols.
"""
import os
import subprocess
import sys
if __name__ == "__main__":
target = os.environ.get("DCC_RS_TARGET")
if target is None:
os.environ["DCC_RS_TARGET"] = target = "debug"
if "DCC_RS_DEV" not in os.environ:
dn = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.environ["DCC_RS_DEV"] = dn
cmd = ["cargo", "build", "-p", "deltachat_ffi", "--features", "jsonrpc"]
if target == "release":
os.environ["CARGO_PROFILE_RELEASE_LTO"] = "on"
cmd.append("--release")
print("running:", " ".join(cmd))
subprocess.check_call(cmd)
subprocess.check_call("rm -rf build/ src/deltachat/*.so src/deltachat/*.dylib src/deltachat/*.dll", shell=True)
if len(sys.argv) <= 1 or sys.argv[1] != "onlybuild":
subprocess.check_call([sys.executable, "-m", "pip", "install", "-e", "."])

View File

@@ -676,13 +676,6 @@ class BotProcess:
print("+++IGN:", line)
ignored.append(line)
def await_resync(self):
self.fnmatch_lines(
"""
*Resync: collected * message IDs in folder INBOX*
""",
)
@pytest.fixture()
def tmp_db_path(tmpdir):

View File

@@ -567,8 +567,7 @@ def test_moved_markseen(acfactory):
with ac2.direct_imap.idle() as idle2:
ac2.start_io()
ev = ac2._evtracker.get_matching("DC_EVENT_INCOMING_MSG|DC_EVENT_MSGS_CHANGED")
msg = ac2.get_message_by_id(ev.data2)
msg = ac2._evtracker.wait_next_incoming_message()
# Accept the contact request.
msg.chat.accept()

View File

@@ -43,7 +43,7 @@ deps =
pygments
restructuredtext_lint
commands =
black --quiet --check --diff setup.py src/deltachat examples/ tests/
black --quiet --check --diff setup.py install_python_bindings.py src/deltachat examples/ tests/
ruff src/deltachat tests/ examples/
rst-lint --encoding 'utf-8' README.rst

View File

@@ -12,7 +12,7 @@ export DCC_RS_DEV=`pwd`
cd python
cargo build -p deltachat_ffi --features jsonrpc
python install_python_bindings.py onlybuild
# remove and inhibit writing PYC files
rm -rf tests/__pycache__
@@ -22,4 +22,4 @@ export PYTHONDONTWRITEBYTECODE=1
# run python tests (tox invokes pytest to run tests in python/tests)
#TOX_PARALLEL_NO_SPINNER=1 tox -e lint,doc
tox -e lint
tox -e doc,py
tox -e doc,py3

View File

@@ -56,8 +56,7 @@ def update_package_json(relpath, newversion):
json_data = json.loads(f.read())
json_data["version"] = newversion
with open(p, "w") as f:
json.dump(json_data, f, sort_keys=True, indent=2)
f.write("\n")
f.write(json.dumps(json_data, sort_keys=True, indent=2))
def main():

View File

@@ -1,32 +0,0 @@
#!/usr/bin/env python
import subprocess
import sys
import os
def flag_filter(flag: str) -> bool:
if flag == "-lc":
return False
if flag == "-Wl,-melf_i386":
return False
if "self-contained" in flag and "crt" in flag:
return False
return True
def main():
args = [flag for flag in sys.argv[1:] if flag_filter(flag)]
zig_target = os.environ["ZIG_TARGET"]
zig_cpu = os.environ.get("ZIG_CPU")
if zig_cpu:
zig_cpu_args = ["-mcpu=" + zig_cpu]
args = [x for x in args if not x.startswith("-march")]
else:
zig_cpu_args = []
subprocess.run(
["zig", "cc", "-target", zig_target, *zig_cpu_args, *args], check=True
)
main()

View File

@@ -1,15 +1,12 @@
#!/bin/sh
#
# Build statically linked deltachat-rpc-server using zig.
# Build statically linked deltachat-rpc-server using cargo-zigbuild.
set -x
set -e
unset RUSTFLAGS
# Pin Rust version to avoid uncontrolled changes in the compiler and linker flags.
export RUSTUP_TOOLCHAIN=1.68.1
ZIG_VERSION=0.11.0-dev.2213+515e1c93e
# Download Zig
@@ -18,35 +15,9 @@ wget "https://ziglang.org/builds/zig-linux-x86_64-$ZIG_VERSION.tar.xz"
tar xf "zig-linux-x86_64-$ZIG_VERSION.tar.xz"
export PATH="$PWD/zig-linux-x86_64-$ZIG_VERSION:$PATH"
rustup target add i686-unknown-linux-musl
CC="$PWD/scripts/zig-cc" \
TARGET_CC="$PWD/scripts/zig-cc" \
CARGO_TARGET_I686_UNKNOWN_LINUX_MUSL_LINKER="$PWD/scripts/zig-cc" \
LD="$PWD/scripts/zig-cc" \
ZIG_TARGET="x86-linux-musl" \
cargo build --release --target i686-unknown-linux-musl -p deltachat-rpc-server --features vendored
cargo install cargo-zigbuild
rustup target add armv7-unknown-linux-musleabihf
CC="$PWD/scripts/zig-cc" \
TARGET_CC="$PWD/scripts/zig-cc" \
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER="$PWD/scripts/zig-cc" \
LD="$PWD/scripts/zig-cc" \
ZIG_TARGET="arm-linux-musleabihf" \
ZIG_CPU="generic+v7a+vfp3-d32+thumb2-neon" \
cargo build --release --target armv7-unknown-linux-musleabihf -p deltachat-rpc-server --features vendored
rustup target add x86_64-unknown-linux-musl
CC="$PWD/scripts/zig-cc" \
TARGET_CC="$PWD/scripts/zig-cc" \
CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER="$PWD/scripts/zig-cc" \
LD="$PWD/scripts/zig-cc" \
ZIG_TARGET="x86_64-linux-musl" \
cargo build --release --target x86_64-unknown-linux-musl -p deltachat-rpc-server --features vendored
rustup target add aarch64-unknown-linux-musl
CC="$PWD/scripts/zig-cc" \
TARGET_CC="$PWD/scripts/zig-cc" \
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER="$PWD/scripts/zig-cc" \
LD="$PWD/scripts/zig-cc" \
ZIG_TARGET="aarch64-linux-musl" \
cargo build --release --target aarch64-unknown-linux-musl -p deltachat-rpc-server --features vendored
for TARGET in x86_64-unknown-linux-musl aarch64-unknown-linux-musl armv7-unknown-linux-musleabihf; do
rustup target add "$TARGET"
cargo zigbuild --release --target "$TARGET" -p deltachat-rpc-server --features vendored
done

View File

@@ -92,7 +92,7 @@ impl<'a> BlobObject<'a> {
if attempt >= MAX_ATTEMPT {
return Err(err).context("failed to create file");
} else if attempt == 1 && !dir.exists() {
fs::create_dir_all(dir).await.log_err(context).ok();
fs::create_dir_all(dir).await.ok_or_log(context);
} else {
name = format!("{}-{}{}", stem, rand::random::<u32>(), ext);
}

View File

@@ -35,9 +35,8 @@ use crate::scheduler::InterruptInfo;
use crate::smtp::send_msg_to_smtp;
use crate::stock_str;
use crate::tools::{
buf_compress, create_id, create_outgoing_rfc724_mid, create_smeared_timestamp,
create_smeared_timestamps, get_abs_path, gm2local_offset, improve_single_line_input, time,
IsNoneOrEmpty,
create_id, create_outgoing_rfc724_mid, create_smeared_timestamp, create_smeared_timestamps,
get_abs_path, gm2local_offset, improve_single_line_input, time, IsNoneOrEmpty,
};
use crate::webxdc::WEBXDC_SUFFIX;
use crate::{location, sql};
@@ -1581,12 +1580,7 @@ impl Chat {
} else {
msg.param.get(Param::SendHtml).map(|s| s.to_string())
};
match html {
Some(html) => Some(tokio::task::block_in_place(move || {
buf_compress(new_html_mimepart(html).build().as_string().as_bytes())
})?),
None => None,
}
html.map(|html| new_html_mimepart(html).build().as_string())
} else {
None
};
@@ -1600,8 +1594,7 @@ impl Chat {
SET rfc724_mid=?, chat_id=?, from_id=?, to_id=?, timestamp=?, type=?,
state=?, txt=?, subject=?, param=?,
hidden=?, mime_in_reply_to=?, mime_references=?, mime_modified=?,
mime_headers=?, mime_compressed=1, location_id=?, ephemeral_timer=?,
ephemeral_timestamp=?
mime_headers=?, location_id=?, ephemeral_timer=?, ephemeral_timestamp=?
WHERE id=?;",
paramsv![
new_rfc724_mid,
@@ -1647,11 +1640,10 @@ impl Chat {
mime_references,
mime_modified,
mime_headers,
mime_compressed,
location_id,
ephemeral_timer,
ephemeral_timestamp)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,1,?,?,?);",
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);",
paramsv![
new_rfc724_mid,
self.id,

View File

@@ -156,9 +156,7 @@ async fn on_configure_completed(
Some(stock_str::aeap_explanation_and_link(context, &old_addr, &new_addr).await);
chat::add_device_msg(context, None, Some(&mut msg))
.await
.context("Cannot add AEAP explanation")
.log_err(context)
.ok();
.ok_or_log_msg(context, "Cannot add AEAP explanation");
}
}
}

View File

@@ -575,8 +575,7 @@ pub(crate) async fn ephemeral_loop(context: &Context, interrupt_receiver: Receiv
delete_expired_messages(context, time())
.await
.log_err(context)
.ok();
.ok_or_log(context);
}
}

View File

@@ -14,7 +14,7 @@ use std::{
use anyhow::{bail, format_err, Context as _, Result};
use async_channel::Receiver;
use async_imap::types::{Fetch, Flag, Name, NameAttribute, UnsolicitedResponse};
use futures::{StreamExt, TryStreamExt};
use futures::StreamExt;
use num_traits::FromPrimitive;
use crate::chat::{self, ChatId, ChatIdBlocked};
@@ -516,7 +516,8 @@ impl Imap {
.uid_fetch("1:*", RFC724MID_UID)
.await
.with_context(|| format!("can't resync folder {folder}"))?;
while let Some(fetch) = list.try_next().await? {
while let Some(fetch) = list.next().await {
let fetch = fetch?;
let headers = match get_fetch_headers(&fetch) {
Ok(headers) => headers,
Err(err) => {
@@ -659,7 +660,7 @@ impl Imap {
.context("Error fetching UID")?;
let mut new_last_seen_uid = None;
while let Some(fetch) = list.try_next().await? {
while let Some(fetch) = list.next().await.transpose()? {
if fetch.message == mailbox.exists && fetch.uid.is_some() {
new_last_seen_uid = fetch.uid;
}
@@ -1197,11 +1198,8 @@ impl Imap {
.await
.context("failed to fetch flags")?;
while let Some(fetch) = list
.try_next()
.await
.context("failed to get FETCH result")?
{
while let Some(fetch) = list.next().await {
let fetch = fetch.context("failed to get FETCH result")?;
let uid = if let Some(uid) = fetch.uid {
uid
} else {
@@ -1260,7 +1258,8 @@ impl Imap {
.await
.context("IMAP Could not fetch")?;
while let Some(msg) = list.try_next().await? {
while let Some(fetch) = list.next().await {
let msg = fetch?;
match get_fetch_headers(&msg) {
Ok(headers) => {
if let Some(from) = mimeparser::get_from(&headers) {
@@ -1295,7 +1294,8 @@ impl Imap {
.context("IMAP could not fetch")?;
let mut msgs = BTreeMap::new();
while let Some(msg) = list.try_next().await? {
while let Some(fetch) = list.next().await {
let msg = fetch?;
if let Some(msg_uid) = msg.uid {
// If the mailbox is not empty, results always include
// at least one UID, even if last_seen_uid+1 is past
@@ -1332,7 +1332,8 @@ impl Imap {
.context("IMAP Could not fetch")?;
let mut msgs = BTreeMap::new();
while let Some(msg) = list.try_next().await? {
while let Some(fetch) = list.next().await {
let msg = fetch?;
if let Some(msg_uid) = msg.uid {
msgs.insert((msg.internal_date(), msg_uid), msg);
}
@@ -1679,7 +1680,8 @@ impl Imap {
let mut delimiter_is_default = true;
let mut folder_configs = BTreeMap::new();
while let Some(folder) = folders.try_next().await? {
while let Some(folder) = folders.next().await {
let folder = folder?;
info!(context, "Scanning folder: {:?}", folder);
// Update the delimiter iff there is a different one, but only once.

View File

@@ -70,9 +70,7 @@ impl Imap {
loop {
self.fetch_move_delete(context, folder.name(), folder_meaning)
.await
.context("Can't fetch new msgs in scanned folder")
.log_err(context)
.ok();
.ok_or_log_msg(context, "Can't fetch new msgs in scanned folder");
let session = self.session.as_mut().context("no session")?;
// If the server sent an unsocicited EXISTS during the fetch, we need to fetch again
@@ -107,11 +105,7 @@ impl Imap {
let list = session
.list(Some(""), Some("*"))
.await?
.filter_map(|f| async {
f.context("list_folders() can't get folder")
.log_err(context)
.ok()
});
.filter_map(|f| async { f.ok_or_log_msg(context, "list_folders() can't get folder") });
Ok(list.collect().await)
}
}

View File

@@ -91,7 +91,7 @@ pub async fn imex(
let cancel = context.alloc_ongoing().await?;
let res = {
let _guard = context.scheduler.pause(context.clone()).await?;
let _guard = context.scheduler.pause(context.clone()).await;
imex_inner(context, what, path, passphrase)
.race(async {
cancel.recv().await.ok();
@@ -759,7 +759,7 @@ async fn export_database(context: &Context, dest: &Path, passphrase: String) ->
.with_context(|| format!("path {} is not valid unicode", dest.display()))?;
context.sql.set_raw_config_int("backup_time", now).await?;
sql::housekeeping(context).await.log_err(context).ok();
sql::housekeeping(context).await.ok_or_log(context);
context
.sql
.call_write(|conn| {

View File

@@ -47,11 +47,9 @@ use tokio_stream::wrappers::ReadDirStream;
use tokio_util::sync::CancellationToken;
use crate::blob::BlobDirContents;
use crate::chat::{add_device_msg, delete_and_reset_all_device_msgs};
use crate::chat::delete_and_reset_all_device_msgs;
use crate::context::Context;
use crate::message::{Message, Viewtype};
use crate::qr::Qr;
use crate::stock_str::backup_transfer_msg_body;
use crate::{e2ee, EventType};
use super::{export_database, DBFILE_BACKUP_NAME};
@@ -99,7 +97,7 @@ impl BackupProvider {
// Acquire global "ongoing" mutex.
let cancel_token = context.alloc_ongoing().await?;
let paused_guard = context.scheduler.pause(context.clone()).await?;
let paused_guard = context.scheduler.pause(context.clone()).await;
let context_dir = context
.get_blobdir()
.parent()
@@ -272,12 +270,7 @@ impl BackupProvider {
}
};
match &res {
Ok(_) => {
context.emit_event(SendProgress::Completed.into());
let mut msg = Message::new(Viewtype::Text);
msg.text = Some(backup_transfer_msg_body(context).await);
add_device_msg(context, None, Some(&mut msg)).await?;
}
Ok(_) => context.emit_event(SendProgress::Completed.into()),
Err(err) => {
error!(context, "Backup transfer failure: {err:#}");
context.emit_event(SendProgress::Failed.into())
@@ -393,7 +386,7 @@ pub async fn get_backup(context: &Context, qr: Qr) -> Result<()> {
!context.is_configured().await?,
"Cannot import backups to accounts in use."
);
let _guard = context.scheduler.pause(context.clone()).await?;
let _guard = context.scheduler.pause(context.clone()).await;
// Acquire global "ongoing" mutex.
let cancel_token = context.alloc_ongoing().await?;

View File

@@ -5,6 +5,7 @@ use std::time::Duration;
use anyhow::{ensure, Context as _, Result};
use async_channel::Receiver;
use bitflags::bitflags;
use quick_xml::events::{BytesEnd, BytesStart, BytesText};
use tokio::time::timeout;
@@ -77,15 +78,16 @@ pub struct Kml {
pub curr: Location,
}
#[derive(Default, Debug, Clone, PartialEq, Eq)]
enum KmlTag {
#[default]
Undefined,
Placemark,
PlacemarkTimestamp,
PlacemarkTimestampWhen,
PlacemarkPoint,
PlacemarkPointCoordinates,
bitflags! {
#[derive(Default)]
struct KmlTag: i32 {
const UNDEFINED = 0x00;
const PLACEMARK = 0x01;
const TIMESTAMP = 0x02;
const WHEN = 0x04;
const POINT = 0x08;
const COORDINATES = 0x10;
}
}
impl Kml {
@@ -126,14 +128,12 @@ impl Kml {
}
fn text_cb(&mut self, event: &BytesText) {
if self.tag == KmlTag::PlacemarkTimestampWhen
|| self.tag == KmlTag::PlacemarkPointCoordinates
{
if self.tag.contains(KmlTag::WHEN) || self.tag.contains(KmlTag::COORDINATES) {
let val = event.unescape().unwrap_or_default();
let val = val.replace(['\n', '\r', '\t', ' '], "");
if self.tag == KmlTag::PlacemarkTimestampWhen && val.len() >= 19 {
if self.tag.contains(KmlTag::WHEN) && val.len() >= 19 {
// YYYY-MM-DDTHH:MM:SSZ
// 0 4 7 10 13 16 19
match chrono::NaiveDateTime::parse_from_str(&val, "%Y-%m-%dT%H:%M:%SZ") {
@@ -147,7 +147,7 @@ impl Kml {
self.curr.timestamp = time();
}
}
} else if self.tag == KmlTag::PlacemarkPointCoordinates {
} else if self.tag.contains(KmlTag::COORDINATES) {
let parts = val.splitn(2, ',').collect::<Vec<_>>();
if let [longitude, latitude] = &parts[..] {
self.curr.longitude = longitude.parse().unwrap_or_default();
@@ -162,41 +162,17 @@ impl Kml {
.trim()
.to_lowercase();
match self.tag {
KmlTag::PlacemarkTimestampWhen => {
if tag == "when" {
self.tag = KmlTag::PlacemarkTimestamp
}
if tag == "placemark" {
if self.tag.contains(KmlTag::PLACEMARK)
&& 0 != self.curr.timestamp
&& 0. != self.curr.latitude
&& 0. != self.curr.longitude
{
self.locations
.push(std::mem::replace(&mut self.curr, Location::new()));
}
KmlTag::PlacemarkTimestamp => {
if tag == "timestamp" {
self.tag = KmlTag::Placemark
}
}
KmlTag::PlacemarkPointCoordinates => {
if tag == "coordinates" {
self.tag = KmlTag::PlacemarkPoint
}
}
KmlTag::PlacemarkPoint => {
if tag == "point" {
self.tag = KmlTag::Placemark
}
}
KmlTag::Placemark => {
if tag == "placemark" {
if 0 != self.curr.timestamp
&& 0. != self.curr.latitude
&& 0. != self.curr.longitude
{
self.locations
.push(std::mem::replace(&mut self.curr, Location::new()));
}
self.tag = KmlTag::Undefined;
}
}
KmlTag::Undefined => {}
}
self.tag = KmlTag::UNDEFINED;
};
}
fn starttag_cb<B: std::io::BufRead>(
@@ -220,19 +196,19 @@ impl Kml {
.map(|a| a.into_owned());
}
} else if tag == "placemark" {
self.tag = KmlTag::Placemark;
self.tag = KmlTag::PLACEMARK;
self.curr.timestamp = 0;
self.curr.latitude = 0.0;
self.curr.longitude = 0.0;
self.curr.accuracy = 0.0
} else if tag == "timestamp" && self.tag == KmlTag::Placemark {
self.tag = KmlTag::PlacemarkTimestamp;
} else if tag == "when" && self.tag == KmlTag::PlacemarkTimestamp {
self.tag = KmlTag::PlacemarkTimestampWhen;
} else if tag == "point" && self.tag == KmlTag::Placemark {
self.tag = KmlTag::PlacemarkPoint;
} else if tag == "coordinates" && self.tag == KmlTag::PlacemarkPoint {
self.tag = KmlTag::PlacemarkPointCoordinates;
} else if tag == "timestamp" && self.tag.contains(KmlTag::PLACEMARK) {
self.tag = KmlTag::PLACEMARK | KmlTag::TIMESTAMP
} else if tag == "when" && self.tag.contains(KmlTag::TIMESTAMP) {
self.tag = KmlTag::PLACEMARK | KmlTag::TIMESTAMP | KmlTag::WHEN
} else if tag == "point" && self.tag.contains(KmlTag::PLACEMARK) {
self.tag = KmlTag::PLACEMARK | KmlTag::POINT
} else if tag == "coordinates" && self.tag.contains(KmlTag::POINT) {
self.tag = KmlTag::PLACEMARK | KmlTag::POINT | KmlTag::COORDINATES;
if let Some(acc) = event.attributes().find(|attr| {
attr.as_ref()
.map(|a| {

View File

@@ -65,6 +65,9 @@ pub trait LogExt<T, E>
where
Self: std::marker::Sized,
{
#[track_caller]
fn log_err_inner(self, context: &Context, msg: Option<&str>) -> Result<T, E>;
/// Emits a warning if the receiver contains an Err value.
///
/// Thanks to the [track_caller](https://blog.rust-lang.org/2020/08/27/Rust-1.46.0.html#track_caller)
@@ -73,23 +76,73 @@ where
/// Unfortunately, the track_caller feature does not work on async functions (as of Rust 1.50).
/// Once it is, you can add `#[track_caller]` to helper functions that use one of the log helpers here
/// so that the location of the caller can be seen in the log. (this won't work with the macros,
/// like warn!(), since the file!() and line!() macros don't work with track_caller)
/// like warn!(), since the file!() and line!() macros don't work with track_caller)
/// See <https://github.com/rust-lang/rust/issues/78840> for progress on this.
#[track_caller]
fn log_err(self, context: &Context) -> Result<T, E>;
fn log_err(self, context: &Context, msg: &str) -> Result<T, E> {
self.log_err_inner(context, Some(msg))
}
/// Emits a warning if the receiver contains an Err value and returns an [`Option<T>`].
///
/// Example:
/// ```text
/// if let Err(e) = do_something() {
/// warn!(context, "{:#}", e);
/// }
/// ```
/// is equivalent to:
/// ```text
/// do_something().ok_or_log(context);
/// ```
///
/// For a note on the `track_caller` feature, see the doc comment on `log_err()`.
#[track_caller]
fn ok_or_log(self, context: &Context) -> Option<T> {
self.log_err_inner(context, None).ok()
}
/// Like `ok_or_log()`, but you can pass an extra message that is prepended in the log.
///
/// Example:
/// ```text
/// if let Err(e) = do_something() {
/// warn!(context, "Something went wrong: {:#}", e);
/// }
/// ```
/// is equivalent to:
/// ```text
/// do_something().ok_or_log_msg(context, "Something went wrong");
/// ```
/// and is also equivalent to:
/// ```text
/// use anyhow::Context as _;
/// do_something().context("Something went wrong").ok_or_log(context);
/// ```
///
/// For a note on the `track_caller` feature, see the doc comment on `log_err()`.
#[track_caller]
fn ok_or_log_msg(self, context: &Context, msg: &'static str) -> Option<T> {
self.log_err_inner(context, Some(msg)).ok()
}
}
impl<T, E: std::fmt::Display> LogExt<T, E> for Result<T, E> {
#[track_caller]
fn log_err(self, context: &Context) -> Result<T, E> {
fn log_err_inner(self, context: &Context, msg: Option<&str>) -> Result<T, E> {
if let Err(e) = &self {
let location = std::panic::Location::caller();
let separator = if msg.is_none() { "" } else { ": " };
let msg = msg.unwrap_or_default();
// We are using Anyhow's .context() and to show the inner error, too, we need the {:#}:
let full = format!(
"{file}:{line}: {e:#}",
"{file}:{line}: {msg}{separator}{e:#}",
file = location.file(),
line = location.line(),
msg = msg,
separator = separator,
e = e
);
// We can't use the warn!() macro here as the file!() and line!() macros

View File

@@ -5,6 +5,7 @@ use std::path::{Path, PathBuf};
use anyhow::{ensure, format_err, Context as _, Result};
use deltachat_derive::{FromSql, ToSql};
use rusqlite::types::ValueRef;
use serde::{Deserialize, Serialize};
use crate::chat::{self, Chat, ChatId};
@@ -28,8 +29,8 @@ use crate::sql;
use crate::stock_str;
use crate::summary::Summary;
use crate::tools::{
buf_compress, buf_decompress, create_smeared_timestamp, get_filebytes, get_filemeta,
gm2local_offset, read_file, time, timestamp_to_str, truncate,
create_smeared_timestamp, get_filebytes, get_filemeta, gm2local_offset, read_file, time,
timestamp_to_str, truncate,
};
/// Message ID, including reserved IDs.
@@ -1349,52 +1350,21 @@ pub(crate) fn guess_msgtype_from_suffix(path: &Path) -> Option<(Viewtype, &str)>
/// e.g. because of save_mime_headers is not set
/// or the message is not incoming.
pub async fn get_mime_headers(context: &Context, msg_id: MsgId) -> Result<Vec<u8>> {
let (headers, compressed) = context
let headers = context
.sql
.query_row(
"SELECT mime_headers, mime_compressed FROM msgs WHERE id=?",
"SELECT mime_headers FROM msgs WHERE id=?;",
paramsv![msg_id],
|row| {
let headers = sql::row_get_vec(row, 0)?;
let compressed: bool = row.get(1)?;
Ok((headers, compressed))
row.get(0).or_else(|err| match row.get_ref(0)? {
ValueRef::Null => Ok(Vec::new()),
ValueRef::Text(text) => Ok(text.to_vec()),
ValueRef::Blob(blob) => Ok(blob.to_vec()),
ValueRef::Integer(_) | ValueRef::Real(_) => Err(err),
})
},
)
.await?;
if compressed {
return buf_decompress(&headers);
}
let headers2 = headers.clone();
let compressed = match tokio::task::block_in_place(move || buf_compress(&headers2)) {
Err(e) => {
warn!(context, "get_mime_headers: buf_compress() failed: {}", e);
return Ok(headers);
}
Ok(o) => o,
};
let update = |conn: &mut rusqlite::Connection| {
match conn.execute(
"\
UPDATE msgs SET mime_headers=?, mime_compressed=1 \
WHERE id=? AND mime_headers!='' AND mime_compressed=0",
params![compressed, msg_id],
) {
Ok(rows_updated) => ensure!(rows_updated <= 1),
Err(e) => {
warn!(context, "get_mime_headers: UPDATE failed: {}", e);
return Err(e.into());
}
}
Ok(())
};
if let Err(e) = context.sql.call_write(update).await {
warn!(
context,
"get_mime_headers: failed to update mime_headers: {}", e
);
}
Ok(headers)
}
@@ -1443,6 +1413,8 @@ pub async fn delete_msgs(context: &Context, msg_ids: &[MsgId]) -> Result<()> {
context.emit_msgs_changed_without_ids();
// Run housekeeping to delete unused blobs.
// We need to use set_raw_config() here since with set_config() it
// wouldn't compile ("recursion in an `async fn`")
context.set_config(Config::LastHousekeeping, None).await?;
}

View File

@@ -27,13 +27,16 @@ use crate::simplify::escape_message_footer_marks;
use crate::stock_str;
use crate::tools::IsNoneOrEmpty;
use crate::tools::{
create_outgoing_rfc724_mid, create_smeared_timestamp, remove_subject_prefix, time,
create_outgoing_rfc724_mid, create_smeared_timestamp, get_filebytes, remove_subject_prefix,
time,
};
// attachments of 25 mb brutto should work on the majority of providers
// (brutto examples: web.de=50, 1&1=40, t-online.de=32, gmail=25, posteo=50, yahoo=25, all-inkl=100).
// as an upper limit, we double the size; the core won't send messages larger than this
// to get the netto sizes, we subtract 1 mb header-overhead and the base64-overhead.
pub const RECOMMENDED_FILE_SIZE: u64 = 24 * 1024 * 1024 / 4 * 3;
const UPPER_LIMIT_FILE_SIZE: u64 = 49 * 1024 * 1024 / 4 * 3;
#[derive(Debug, Clone)]
pub enum Loaded {
@@ -1208,8 +1211,15 @@ impl<'a> MimeFactory<'a> {
// add attachment part
if self.msg.viewtype.has_file() {
let (file_part, _) = build_body_file(context, self.msg, "").await?;
parts.push(file_part);
if !is_file_size_okay(context, self.msg).await? {
bail!(
"Message exceeds the recommended {} MB.",
RECOMMENDED_FILE_SIZE / 1_000_000,
);
} else {
let (file_part, _) = build_body_file(context, self.msg, "").await?;
parts.push(file_part);
}
}
if let Some(meta_part) = meta_part {
@@ -1473,6 +1483,16 @@ fn recipients_contain_addr(recipients: &[(String, String)], addr: &str) -> bool
.any(|(_, cur)| cur.to_lowercase() == addr_lc)
}
async fn is_file_size_okay(context: &Context, msg: &Message) -> Result<bool> {
match msg.param.get_path(Param::File, context)? {
Some(path) => {
let bytes = get_filebytes(context, &path).await?;
Ok(bytes <= UPPER_LIMIT_FILE_SIZE)
}
None => Ok(false),
}
}
fn render_rfc724_mid(rfc724_mid: &str) -> String {
let rfc724_mid = rfc724_mid.trim().to_string();

View File

@@ -1283,7 +1283,7 @@ impl MimeMessage {
if !key.details.users.iter().any(|user| {
user.id
.id()
.ends_with((String::from("<") + &peerstate.addr + ">").as_bytes())
.ends_with(&(String::from("<") + &peerstate.addr + ">"))
}) {
return Ok(false);
}

View File

@@ -10,8 +10,7 @@ use pgp::composed::{
Deserializable, KeyType as PgpKeyType, Message, SecretKeyParamsBuilder, SignedPublicKey,
SignedPublicSubKey, SignedSecretKey, StandaloneSignature, SubkeyParamsBuilder,
};
use pgp::crypto::hash::HashAlgorithm;
use pgp::crypto::sym::SymmetricKeyAlgorithm;
use pgp::crypto::{HashAlgorithm, SymmetricKeyAlgorithm};
use pgp::types::{
CompressionAlgorithm, KeyTrait, Mpi, PublicKeyTrait, SecretKeyTrait, StringToKey,
};
@@ -51,7 +50,7 @@ impl<'a> KeyTrait for SignedPublicKeyOrSubkey<'a> {
}
}
fn algorithm(&self) -> pgp::crypto::public_key::PublicKeyAlgorithm {
fn algorithm(&self) -> pgp::crypto::PublicKeyAlgorithm {
match self {
Self::Key(k) => k.algorithm(),
Self::Subkey(k) => k.algorithm(),
@@ -298,7 +297,7 @@ pub fn pk_decrypt(
let skeys: Vec<&SignedSecretKey> = private_keys_for_decryption.keys().iter().collect();
let (decryptor, _) = msg.decrypt(|| "".into(), &skeys[..])?;
let (decryptor, _) = msg.decrypt(|| "".into(), || "".into(), &skeys[..])?;
let msgs = decryptor.collect::<pgp::errors::Result<Vec<_>>>()?;
if let Some(msg) = msgs.into_iter().next() {

View File

@@ -37,7 +37,7 @@ use crate::reaction::{set_msg_reaction, Reaction};
use crate::securejoin::{self, handle_securejoin_handshake, observe_securejoin_on_other_device};
use crate::sql;
use crate::stock_str;
use crate::tools::{buf_compress, extract_grpid_from_rfc724_mid, smeared_time};
use crate::tools::{extract_grpid_from_rfc724_mid, smeared_time};
use crate::{contact, imap};
/// This is the struct that is returned after receiving one email (aka MIME message).
@@ -691,8 +691,7 @@ async fn add_parts(
} else if allow_creation {
if let Ok(chat) = ChatIdBlocked::get_for_contact(context, from_id, create_blocked)
.await
.context("Failed to get (new) chat for contact")
.log_err(context)
.log_err(context, "Failed to get (new) chat for contact")
{
chat_id = Some(chat.id);
chat_id_blocked = chat.blocked;
@@ -844,8 +843,7 @@ async fn add_parts(
// maybe an Autocrypt Setup Message
if let Ok(chat) = ChatIdBlocked::get_for_contact(context, ContactId::SELF, Blocked::Not)
.await
.context("Failed to get (new) chat for contact")
.log_err(context)
.log_err(context, "Failed to get (new) chat for contact")
{
chat_id = Some(chat.id);
chat_id_blocked = chat.blocked;
@@ -1065,12 +1063,11 @@ async fn add_parts(
let mut save_mime_modified = mime_parser.is_mime_modified;
let mime_headers = if save_mime_headers || save_mime_modified {
let headers = if mime_parser.was_encrypted() && !mime_parser.decoded_data.is_empty() {
if mime_parser.was_encrypted() && !mime_parser.decoded_data.is_empty() {
mime_parser.decoded_data.clone()
} else {
imf_raw.to_vec()
};
tokio::task::block_in_place(move || buf_compress(&headers))?
}
} else {
Vec::new()
};
@@ -1153,7 +1150,7 @@ INSERT INTO msgs
from_id, to_id, timestamp, timestamp_sent,
timestamp_rcvd, type, state, msgrmsg,
txt, subject, txt_raw, param,
bytes, mime_headers, mime_compressed, mime_in_reply_to,
bytes, mime_headers, mime_in_reply_to,
mime_references, mime_modified, error, ephemeral_timer,
ephemeral_timestamp, download_state, hop_info
)
@@ -1162,7 +1159,7 @@ INSERT INTO msgs
?, ?, ?, ?,
?, ?, ?, ?,
?, ?, ?, ?,
?, ?, ?, ?, 1,
?, ?, ?, ?,
?, ?, ?, ?,
?, ?, ?, ?
)
@@ -1171,8 +1168,7 @@ SET rfc724_mid=excluded.rfc724_mid, chat_id=excluded.chat_id,
from_id=excluded.from_id, to_id=excluded.to_id, timestamp=excluded.timestamp, timestamp_sent=excluded.timestamp_sent,
timestamp_rcvd=excluded.timestamp_rcvd, type=excluded.type, state=excluded.state, msgrmsg=excluded.msgrmsg,
txt=excluded.txt, subject=excluded.subject, txt_raw=excluded.txt_raw, param=excluded.param,
bytes=excluded.bytes, mime_headers=excluded.mime_headers,
mime_compressed=excluded.mime_compressed, mime_in_reply_to=excluded.mime_in_reply_to,
bytes=excluded.bytes, mime_headers=excluded.mime_headers, mime_in_reply_to=excluded.mime_in_reply_to,
mime_references=excluded.mime_references, mime_modified=excluded.mime_modified, error=excluded.error, ephemeral_timer=excluded.ephemeral_timer,
ephemeral_timestamp=excluded.ephemeral_timestamp, download_state=excluded.download_state, hop_info=excluded.hop_info
"#)?;

View File

@@ -1,8 +1,7 @@
use std::iter::{self, once};
use std::num::NonZeroUsize;
use std::sync::atomic::Ordering;
use anyhow::{bail, Context as _, Error, Result};
use anyhow::{bail, Context as _, Result};
use async_channel::{self as channel, Receiver, Sender};
use futures::future::try_join_all;
use futures_lite::FutureExt;
@@ -44,18 +43,15 @@ impl SchedulerState {
/// Whether the scheduler is currently running.
pub(crate) async fn is_running(&self) -> bool {
let inner = self.inner.read().await;
matches!(*inner, InnerSchedulerState::Started(_))
inner.scheduler.is_some()
}
/// Starts the scheduler if it is not yet started.
pub(crate) async fn start(&self, context: Context) {
let mut inner = self.inner.write().await;
match *inner {
InnerSchedulerState::Started(_) => (),
InnerSchedulerState::Stopped => Self::do_start(inner, context).await,
InnerSchedulerState::Paused {
ref mut started, ..
} => *started = true,
inner.started = true;
if inner.scheduler.is_none() && !inner.paused {
Self::do_start(inner, context).await;
}
}
@@ -64,7 +60,7 @@ impl SchedulerState {
info!(context, "starting IO");
let ctx = context.clone();
match Scheduler::start(context).await {
Ok(scheduler) => *inner = InnerSchedulerState::Started(scheduler),
Ok(scheduler) => inner.scheduler = Some(scheduler),
Err(err) => error!(&ctx, "Failed to start IO: {:#}", err),
}
}
@@ -72,23 +68,12 @@ impl SchedulerState {
/// Stops the scheduler if it is currently running.
pub(crate) async fn stop(&self, context: &Context) {
let mut inner = self.inner.write().await;
match *inner {
InnerSchedulerState::Started(_) => {
Self::do_stop(inner, context, InnerSchedulerState::Stopped).await
}
InnerSchedulerState::Stopped => (),
InnerSchedulerState::Paused {
ref mut started, ..
} => *started = false,
}
inner.started = false;
Self::do_stop(inner, context).await;
}
/// Stops the scheduler if it is currently running.
async fn do_stop(
mut inner: RwLockWriteGuard<'_, InnerSchedulerState>,
context: &Context,
new_state: InnerSchedulerState,
) {
async fn do_stop(mut inner: RwLockWriteGuard<'_, InnerSchedulerState>, context: &Context) {
// Sending an event wakes up event pollers (get_next_event)
// so the caller of stop_io() can arrange for proper termination.
// For this, the caller needs to instruct the event poller
@@ -98,10 +83,8 @@ impl SchedulerState {
if let Some(debug_logging) = context.debug_logging.read().await.as_ref() {
debug_logging.loop_handle.abort();
}
let prev_state = std::mem::replace(&mut *inner, new_state);
match prev_state {
InnerSchedulerState::Started(scheduler) => scheduler.stop(context).await,
InnerSchedulerState::Stopped | InnerSchedulerState::Paused { .. } => (),
if let Some(scheduler) = inner.scheduler.take() {
scheduler.stop(context).await;
}
}
@@ -113,63 +96,22 @@ impl SchedulerState {
/// If in the meantime [`SchedulerState::start`] or [`SchedulerState::stop`] is called
/// resume will do the right thing and restore the scheduler to the state requested by
/// the last call.
pub(crate) async fn pause<'a>(&'_ self, context: Context) -> Result<IoPausedGuard> {
pub(crate) async fn pause<'a>(&'_ self, context: Context) -> IoPausedGuard {
{
let mut inner = self.inner.write().await;
match *inner {
InnerSchedulerState::Started(_) => {
let new_state = InnerSchedulerState::Paused {
started: true,
pause_guards_count: NonZeroUsize::new(1).unwrap(),
};
Self::do_stop(inner, &context, new_state).await;
}
InnerSchedulerState::Stopped => {
*inner = InnerSchedulerState::Paused {
started: false,
pause_guards_count: NonZeroUsize::new(1).unwrap(),
};
}
InnerSchedulerState::Paused {
ref mut pause_guards_count,
..
} => {
*pause_guards_count = pause_guards_count
.checked_add(1)
.ok_or_else(|| Error::msg("Too many pause guards active"))?
}
}
inner.paused = true;
Self::do_stop(inner, &context).await;
}
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
rx.await.ok();
let mut inner = context.scheduler.inner.write().await;
match *inner {
InnerSchedulerState::Started(_) => {
warn!(&context, "IoPausedGuard resume: started instead of paused");
}
InnerSchedulerState::Stopped => {
warn!(&context, "IoPausedGuard resume: stopped instead of paused");
}
InnerSchedulerState::Paused {
ref started,
ref mut pause_guards_count,
} => {
if *pause_guards_count == NonZeroUsize::new(1).unwrap() {
match *started {
true => SchedulerState::do_start(inner, context.clone()).await,
false => *inner = InnerSchedulerState::Stopped,
}
} else {
let new_count = pause_guards_count.get() - 1;
// SAFETY: Value was >=2 before due to if condition
*pause_guards_count = NonZeroUsize::new(new_count).unwrap();
}
}
inner.paused = false;
if inner.started && inner.scheduler.is_none() {
SchedulerState::do_start(inner, context.clone()).await;
}
});
Ok(IoPausedGuard { sender: Some(tx) })
IoPausedGuard { sender: Some(tx) }
}
/// Restarts the scheduler, only if it is running.
@@ -184,8 +126,8 @@ impl SchedulerState {
/// Indicate that the network likely has come back.
pub(crate) async fn maybe_network(&self) {
let inner = self.inner.read().await;
let (inbox, oboxes) = match *inner {
InnerSchedulerState::Started(ref scheduler) => {
let (inbox, oboxes) = match inner.scheduler {
Some(ref scheduler) => {
scheduler.maybe_network();
let inbox = scheduler.inbox.conn_state.state.connectivity.clone();
let oboxes = scheduler
@@ -195,7 +137,7 @@ impl SchedulerState {
.collect::<Vec<_>>();
(inbox, oboxes)
}
_ => return,
None => return,
};
drop(inner);
connectivity::idle_interrupted(inbox, oboxes).await;
@@ -204,15 +146,15 @@ impl SchedulerState {
/// Indicate that the network likely is lost.
pub(crate) async fn maybe_network_lost(&self, context: &Context) {
let inner = self.inner.read().await;
let stores = match *inner {
InnerSchedulerState::Started(ref scheduler) => {
let stores = match inner.scheduler {
Some(ref scheduler) => {
scheduler.maybe_network_lost();
scheduler
.boxes()
.map(|b| b.conn_state.state.connectivity.clone())
.collect()
}
_ => return,
None => return,
};
drop(inner);
connectivity::maybe_network_lost(context, stores).await;
@@ -220,49 +162,45 @@ impl SchedulerState {
pub(crate) async fn interrupt_inbox(&self, info: InterruptInfo) {
let inner = self.inner.read().await;
if let InnerSchedulerState::Started(ref scheduler) = *inner {
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_inbox(info);
}
}
pub(crate) async fn interrupt_smtp(&self, info: InterruptInfo) {
let inner = self.inner.read().await;
if let InnerSchedulerState::Started(ref scheduler) = *inner {
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_smtp(info);
}
}
pub(crate) async fn interrupt_ephemeral_task(&self) {
let inner = self.inner.read().await;
if let InnerSchedulerState::Started(ref scheduler) = *inner {
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_ephemeral_task();
}
}
pub(crate) async fn interrupt_location(&self) {
let inner = self.inner.read().await;
if let InnerSchedulerState::Started(ref scheduler) = *inner {
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_location();
}
}
pub(crate) async fn interrupt_recently_seen(&self, contact_id: ContactId, timestamp: i64) {
let inner = self.inner.read().await;
if let InnerSchedulerState::Started(ref scheduler) = *inner {
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_recently_seen(contact_id, timestamp);
}
}
}
#[derive(Debug, Default)]
enum InnerSchedulerState {
Started(Scheduler),
#[default]
Stopped,
Paused {
started: bool,
pause_guards_count: NonZeroUsize,
},
struct InnerSchedulerState {
scheduler: Option<Scheduler>,
started: bool,
paused: bool,
}
/// Guard to make sure the IO Scheduler is resumed.
@@ -363,7 +301,7 @@ async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConne
let next_housekeeping_time =
last_housekeeping_time.saturating_add(60 * 60 * 24);
if next_housekeeping_time <= time() {
sql::housekeeping(&ctx).await.log_err(&ctx).ok();
sql::housekeeping(&ctx).await.ok_or_log(&ctx);
}
}
Err(err) => {
@@ -472,8 +410,7 @@ async fn fetch_idle(
.store_seen_flags_on_imap(ctx)
.await
.context("store_seen_flags_on_imap")
.log_err(ctx)
.ok();
.ok_or_log(ctx);
} else {
warn!(ctx, "No session even though we just prepared it");
}
@@ -497,8 +434,7 @@ async fn fetch_idle(
delete_expired_imap_messages(ctx)
.await
.context("delete_expired_imap_messages")
.log_err(ctx)
.ok();
.ok_or_log(ctx);
// Scan additional folders only after finishing fetching the watched folder.
//
@@ -538,8 +474,7 @@ async fn fetch_idle(
.sync_seen_flags(ctx, &watch_folder)
.await
.context("sync_seen_flags")
.log_err(ctx)
.ok();
.ok_or_log(ctx);
connection.connectivity.set_connected(ctx).await;
@@ -835,22 +770,20 @@ impl Scheduler {
pub(crate) async fn stop(self, context: &Context) {
// Send stop signals to tasks so they can shutdown cleanly.
for b in self.boxes() {
b.conn_state.stop().await.log_err(context).ok();
b.conn_state.stop().await.ok_or_log(context);
}
self.smtp.stop().await.log_err(context).ok();
self.smtp.stop().await.ok_or_log(context);
// Actually shutdown tasks.
let timeout_duration = std::time::Duration::from_secs(30);
for b in once(self.inbox).chain(self.oboxes.into_iter()) {
tokio::time::timeout(timeout_duration, b.handle)
.await
.log_err(context)
.ok();
.ok_or_log(context);
}
tokio::time::timeout(timeout_duration, self.smtp_handle)
.await
.log_err(context)
.ok();
.ok_or_log(context);
self.ephemeral_handle.abort();
self.location_handle.abort();
self.recently_seen_loop.abort();

View File

@@ -14,8 +14,6 @@ use crate::tools::time;
use crate::{context::Context, log::LogExt};
use crate::{stock_str, tools};
use super::InnerSchedulerState;
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumProperty, PartialOrd, Ord)]
pub enum Connectivity {
NotConnected = 1000,
@@ -228,12 +226,12 @@ impl Context {
/// If the connectivity changes, a DC_EVENT_CONNECTIVITY_CHANGED will be emitted.
pub async fn get_connectivity(&self) -> Connectivity {
let lock = self.scheduler.inner.read().await;
let stores: Vec<_> = match *lock {
InnerSchedulerState::Started(ref sched) => sched
let stores: Vec<_> = match lock.scheduler {
Some(ref sched) => sched
.boxes()
.map(|b| b.conn_state.state.connectivity.clone())
.collect(),
_ => return Connectivity::NotConnected,
None => return Connectivity::NotConnected,
};
drop(lock);
@@ -311,15 +309,15 @@ impl Context {
// =============================================================================================
let lock = self.scheduler.inner.read().await;
let (folders_states, smtp) = match *lock {
InnerSchedulerState::Started(ref sched) => (
let (folders_states, smtp) = match lock.scheduler {
Some(ref sched) => (
sched
.boxes()
.map(|b| (b.meaning, b.conn_state.state.connectivity.clone()))
.collect::<Vec<_>>(),
sched.smtp.state.connectivity.clone(),
),
_ => {
None => {
return Err(anyhow!("Not started"));
}
};
@@ -339,7 +337,7 @@ impl Context {
let mut folder_added = false;
if let Some(config) = folder.to_config().filter(|c| watched_folders.contains(c)) {
let f = self.get_config(config).await.log_err(self).ok().flatten();
let f = self.get_config(config).await.ok_or_log(self).flatten();
if let Some(foldername) = f {
let detailed = &state.get_detailed().await;
@@ -398,72 +396,64 @@ impl Context {
if let Some(quota) = &*quota {
match &quota.recent {
Ok(quota) => {
if !quota.is_empty() {
for (root_name, resources) in quota {
use async_imap::types::QuotaResourceName::*;
for resource in resources {
ret += "<li>";
let roots_cnt = quota.len();
for (root_name, resources) in quota {
use async_imap::types::QuotaResourceName::*;
for resource in resources {
ret += "<li>";
// root name is empty eg. for gmail and redundant eg. for riseup.
// therefore, use it only if there are really several roots.
if quota.len() > 1 && !root_name.is_empty() {
ret += &format!(
"<b>{}:</b> ",
&*escaper::encode_minimal(root_name)
);
} else {
info!(
self,
"connectivity: root name hidden: \"{}\"", root_name
);
}
let messages = stock_str::messages(self).await;
let part_of_total_used = stock_str::part_of_total_used(
self,
&resource.usage.to_string(),
&resource.limit.to_string(),
)
.await;
ret += &match &resource.name {
Atom(resource_name) => {
format!(
"<b>{}:</b> {}",
&*escaper::encode_minimal(resource_name),
part_of_total_used
)
}
Message => {
format!("<b>{part_of_total_used}:</b> {messages}")
}
Storage => {
// do not use a special title needed for "Storage":
// - it is usually shown directly under the "Storage" headline
// - by the units "1 MB of 10 MB used" there is some difference to eg. "Messages: 1 of 10 used"
// - the string is not longer than the other strings that way (minus title, plus units) -
// additional linebreaks on small displays are unlikely therefore
// - most times, this is the only item anyway
let usage = &format_size(resource.usage * 1024, BINARY);
let limit = &format_size(resource.limit * 1024, BINARY);
stock_str::part_of_total_used(self, usage, limit).await
}
};
let percent = resource.get_usage_percentage();
let color = if percent >= QUOTA_ERROR_THRESHOLD_PERCENTAGE {
"red"
} else if percent >= QUOTA_WARN_THRESHOLD_PERCENTAGE {
"yellow"
} else {
"green"
};
ret += &format!("<div class=\"bar\"><div class=\"progress {color}\" style=\"width: {percent}%\">{percent}%</div></div>");
ret += "</li>";
// root name is empty eg. for gmail and redundant eg. for riseup.
// therefore, use it only if there are really several roots.
if roots_cnt > 1 && !root_name.is_empty() {
ret +=
&format!("<b>{}:</b> ", &*escaper::encode_minimal(root_name));
} else {
info!(self, "connectivity: root name hidden: \"{}\"", root_name);
}
let messages = stock_str::messages(self).await;
let part_of_total_used = stock_str::part_of_total_used(
self,
&resource.usage.to_string(),
&resource.limit.to_string(),
)
.await;
ret += &match &resource.name {
Atom(resource_name) => {
format!(
"<b>{}:</b> {}",
&*escaper::encode_minimal(resource_name),
part_of_total_used
)
}
Message => {
format!("<b>{part_of_total_used}:</b> {messages}")
}
Storage => {
// do not use a special title needed for "Storage":
// - it is usually shown directly under the "Storage" headline
// - by the units "1 MB of 10 MB used" there is some difference to eg. "Messages: 1 of 10 used"
// - the string is not longer than the other strings that way (minus title, plus units) -
// additional linebreaks on small displays are unlikely therefore
// - most times, this is the only item anyway
let usage = &format_size(resource.usage * 1024, BINARY);
let limit = &format_size(resource.limit * 1024, BINARY);
stock_str::part_of_total_used(self, usage, limit).await
}
};
let percent = resource.get_usage_percentage();
let color = if percent >= QUOTA_ERROR_THRESHOLD_PERCENTAGE {
"red"
} else if percent >= QUOTA_WARN_THRESHOLD_PERCENTAGE {
"yellow"
} else {
"green"
};
ret += &format!("<div class=\"bar\"><div class=\"progress {color}\" style=\"width: {percent}%\">{percent}%</div></div>");
ret += "</li>";
}
} else {
ret += format!("<li>Warning: {domain} claims to support quota but gives no information</li>").as_str();
}
}
Err(e) => {
@@ -490,14 +480,14 @@ impl Context {
/// Returns true if all background work is done.
pub async fn all_work_done(&self) -> bool {
let lock = self.scheduler.inner.read().await;
let stores: Vec<_> = match *lock {
InnerSchedulerState::Started(ref sched) => sched
let stores: Vec<_> = match lock.scheduler {
Some(ref sched) => sched
.boxes()
.map(|b| &b.conn_state.state)
.chain(once(&sched.smtp.state))
.map(|state| state.connectivity.clone())
.collect(),
_ => return false,
None => return false,
};
drop(lock);

View File

@@ -5,7 +5,7 @@ use std::convert::TryFrom;
use std::path::{Path, PathBuf};
use anyhow::{bail, Context as _, Result};
use rusqlite::{self, config::DbConfig, types::ValueRef, Connection, OpenFlags, Row};
use rusqlite::{self, config::DbConfig, Connection, OpenFlags};
use tokio::sync::{Mutex, MutexGuard, RwLock};
use crate::blob::BlobObject;
@@ -57,7 +57,7 @@ pub struct Sql {
/// Database file path
pub(crate) dbfile: PathBuf,
/// Write transactions mutex.
/// Write transaction mutex.
///
/// See [`Self::write_lock`].
write_mtx: Mutex<()>,
@@ -696,15 +696,6 @@ fn new_connection(path: &Path, passphrase: &str) -> Result<Connection> {
/// Cleanup the account to restore some storage and optimize the database.
pub async fn housekeeping(context: &Context) -> Result<()> {
// Setting `Config::LastHousekeeping` at the beginning avoids endless loops when things do not
// work out for whatever reason or are interrupted by the OS.
if let Err(e) = context
.set_config(Config::LastHousekeeping, Some(&time().to_string()))
.await
{
warn!(context, "Can't set config: {e:#}.");
}
if let Err(err) = remove_unused_files(context).await {
warn!(
context,
@@ -752,6 +743,13 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
}
}
if let Err(e) = context
.set_config(Config::LastHousekeeping, Some(&time().to_string()))
.await
{
warn!(context, "Can't set config: {e:#}.");
}
context
.sql
.execute(
@@ -759,24 +757,12 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
(),
)
.await
.context("failed to remove old MDNs")
.log_err(context)
.ok();
.ok_or_log_msg(context, "failed to remove old MDNs");
info!(context, "Housekeeping done.");
Ok(())
}
/// Get the value of a column `idx` of the `row` as `Vec<u8>`.
pub fn row_get_vec(row: &Row, idx: usize) -> rusqlite::Result<Vec<u8>> {
row.get(idx).or_else(|err| match row.get_ref(idx)? {
ValueRef::Null => Ok(Vec::new()),
ValueRef::Text(text) => Ok(text.to_vec()),
ValueRef::Blob(blob) => Ok(blob.to_vec()),
ValueRef::Integer(_) | ValueRef::Real(_) => Err(err),
})
}
/// Enumerates used files in the blobdir and removes unused ones.
pub async fn remove_unused_files(context: &Context) -> Result<()> {
let mut files_in_use = HashSet::new();
@@ -909,14 +895,12 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
}
}
Err(err) => {
if !p.ends_with(BLOBS_BACKUP_NAME) {
warn!(
context,
"Housekeeping: Cannot read dir {}: {:#}.",
p.display(),
err
);
}
warn!(
context,
"Housekeeping: Cannot read dir {}: {:#}.",
p.display(),
err
);
}
}
}

View File

@@ -704,13 +704,6 @@ CREATE INDEX smtp_messageid ON imap(rfc724_mid);
// Reverted above, as it requires to load the whole DB in memory.
sql.set_db_version(99).await?;
}
if dbversion < 100 {
sql.execute_migration(
"ALTER TABLE msgs ADD COLUMN mime_compressed INTEGER NOT NULL DEFAULT 0",
100,
)
.await?;
}
let new_version = sql
.get_raw_config_int(VERSION_CFG)
@@ -742,18 +735,14 @@ impl Sql {
Ok(())
}
// Sets db `version` in the `transaction`.
fn set_db_version_trans(transaction: &mut rusqlite::Transaction, version: i32) -> Result<()> {
transaction.execute(
"UPDATE config SET value=? WHERE keyname=?;",
params![format!("{version}"), VERSION_CFG],
)?;
Ok(())
}
async fn execute_migration(&self, query: &str, version: i32) -> Result<()> {
async fn execute_migration(&self, query: &'static str, version: i32) -> Result<()> {
self.transaction(move |transaction| {
Self::set_db_version_trans(transaction, version)?;
// set raw config inside the transaction
transaction.execute(
"UPDATE config SET value=? WHERE keyname=?;",
paramsv![format!("{version}"), VERSION_CFG],
)?;
transaction.execute_batch(query)?;
Ok(())

View File

@@ -407,9 +407,6 @@ pub enum StockMessage {
#[strum(props(fallback = "Scan to set up second device for %1$s"))]
BackupTransferQr = 162,
#[strum(props(fallback = " Account transferred to your second device."))]
BackupTransferMsgBody = 163,
}
impl StockMessage {
@@ -1264,10 +1261,6 @@ pub(crate) async fn backup_transfer_qr(context: &Context) -> Result<String> {
.replace1(&full_name))
}
pub(crate) async fn backup_transfer_msg_body(context: &Context) -> String {
translated(context, StockMessage::BackupTransferMsgBody).await
}
impl Context {
/// Set the stock string for the [StockMessage].
///

View File

@@ -5,8 +5,7 @@
use std::borrow::Cow;
use std::fmt;
use std::io::{Cursor, Write};
use std::mem;
use std::io::Cursor;
use std::path::{Path, PathBuf};
use std::str::from_utf8;
use std::time::{Duration, SystemTime};
@@ -655,42 +654,6 @@ pub(crate) fn single_value<T>(collection: impl IntoIterator<Item = T>) -> Option
None
}
/// Compressor/decompressor buffer size.
const BROTLI_BUFSZ: usize = 4096;
/// Compresses `buf` to `Vec` using `brotli`.
/// Note that it handles an empty `buf` as a special value that remains empty after compression,
/// otherwise brotli would add its metadata to it which is not nice because this function is used
/// for compression of strings stored in the db and empty strings are common there. This approach is
/// not strictly correct because nowhere in the brotli documentation is said that an empty buffer
/// can't be a result of compression of some input, but i think this will never break.
pub(crate) fn buf_compress(buf: &[u8]) -> Result<Vec<u8>> {
if buf.is_empty() {
return Ok(Vec::new());
}
// level 4 is 2x faster than level 6 (and 54x faster than 10, for comparison).
// with the adaptiveness, we aim to not slow down processing
// single large files too much, esp. on low-budget devices.
// in tests (see #4129), this makes a difference, without compressing much worse.
let q: u32 = if buf.len() > 1_000_000 { 4 } else { 6 };
let lgwin: u32 = 22; // log2(LZ77 window size), it's the default for brotli CLI tool.
let mut compressor = brotli::CompressorWriter::new(Vec::new(), BROTLI_BUFSZ, q, lgwin);
compressor.write_all(buf)?;
Ok(compressor.into_inner())
}
/// Decompresses `buf` to `Vec` using `brotli`.
/// See `buf_compress()` for why we don't pass an empty buffer to brotli decompressor.
pub(crate) fn buf_decompress(buf: &[u8]) -> Result<Vec<u8>> {
if buf.is_empty() {
return Ok(Vec::new());
}
let mut decompressor = brotli::DecompressorWriter::new(Vec::new(), BROTLI_BUFSZ);
decompressor.write_all(buf)?;
decompressor.flush()?;
Ok(mem::take(decompressor.get_mut()))
}
#[cfg(test)]
mod tests {
#![allow(clippy::indexing_slicing)]