Compare commits

..

202 Commits

Author SHA1 Message Date
link2xt
4f364f0433 Use tracing crate for logging 2023-03-25 17:35:56 +00:00
link2xt
381af52886 lockfile 2023-03-25 13:56:50 +00:00
link2xt
8c660ad828 Explicitly emit INFO events in stop_io() 2023-03-25 13:56:49 +00:00
link2xt
dab288936a Move last_error from Context to Events 2023-03-25 12:38:56 +00:00
link2xt
fb093253c6 Update generated nodejs constants files 2023-03-25 10:20:17 +00:00
dependabot[bot]
1864be5c55 Merge pull request #4229 from deltachat/dependabot/cargo/fuzz/openssl-0.10.48 2023-03-25 01:06:43 +00:00
dependabot[bot]
7138d44083 build(deps): bump openssl from 0.10.45 to 0.10.48 in /fuzz
Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.45 to 0.10.48.
- [Release notes](https://github.com/sfackler/rust-openssl/releases)
- [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.45...openssl-v0.10.48)

---
updated-dependencies:
- dependency-name: openssl
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-25 00:31:14 +00:00
Floris Bruynooghe
04daff0608 Add comment 2023-03-24 23:20:14 +00:00
Floris Bruynooghe
801250a9e0 feat(imex): Sort IP addresses for getting from ticket
When trying IP addresses from the ticket, have a very rough sort order
in which to try them.  Basically assume most local wifi's are
somewhere on 192.168.0.0/16 so prefer those first.
2023-03-24 23:20:14 +00:00
link2xt
20319b5426 Add --version option to deltachat-rpc-server 2023-03-24 23:11:11 +00:00
link2xt
9cca34bba5 Remove .wait_for_seen() calls in flaky tests
This is a merge commit for <https://github.com/deltachat/deltachat-core-rust/pull/4226>
2023-03-24 23:09:44 +00:00
link2xt
530981119e Update Cargo.lock 2023-03-24 23:08:46 +00:00
adbenitez
6d0327d057 deltachat-rpc-client: fix bug in Chat.send_message() 2023-03-24 18:56:08 -04:00
adbenitez
580ce5a9e9 deltachat-rpc-client: fix bug in Chat.send_message() 2023-03-24 17:34:41 -04:00
link2xt
6f327c950d Cargo.toml: replace branch with rev in default-net patch entry
This makes `cargo install --path deltachat-rpc-server` work again.
2023-03-24 21:16:04 +00:00
link2xt
92ad843ff2 Reduce test code duplication 2023-03-24 20:59:31 +00:00
link2xt
a8059c6bff python: remove flaky .wait_for_seen() from test_fetch_existing() 2023-03-24 18:09:40 +00:00
link2xt
4b468a25fe Remove pytest-rerunfailures
It is not compatible with pytest-timeout anyway:
<https://github.com/pytest-dev/pytest-rerunfailures/issues/99>
2023-03-24 16:13:33 +00:00
link2xt
1e135b649c Do not use IDLE in test_send_and_receive_message_markseen
Fix flakyness in case Dovecot sends only EXISTS
but not FETCH unsolicited response.
2023-03-24 16:09:40 +00:00
link2xt
40d32f2d0c Do not use IDLE in test_fetch_existing_msgs_group_and_single
Makes the test less flaky,
as Dovecot sometimes sends only the EXISTS response,
but not the FETCH response for flags.
2023-03-24 16:09:28 +00:00
link2xt
c9ec087cd8 python: do not use IDLE in test_markseen_message_and_mdn test
Make the test less flaky in case Dovecot notifies only about EXISTS
but not flag updates.
2023-03-24 16:09:15 +00:00
link2xt
84d79e1479 Do not use IDLE in test_mdn_asymmetric
Fixes test flakyness.
2023-03-24 16:08:58 +00:00
link2xt
83af248068 Turn more log messages into proper sentences. 2023-03-24 12:15:42 +00:00
link2xt
4f25edb1a1 Update OpenSSL to fix RUSTSEC-2023-0022
<https://rustsec.org/advisories/RUSTSEC-2023-0022>
2023-03-24 09:56:00 +00:00
link2xt
ded1634b7d python: look for "Marked messages ... as seen." by regexp
Tests reuse accounts, so UIDs may start from the number other than 1.
2023-03-24 09:43:59 +00:00
link2xt
635c73ffc6 python: use locally installed auditwheel
`auditwheel` is a part of manylinux images,
there is no need to create a virtual environment
and install it again.
2023-03-23 23:00:46 +00:00
link2xt
fcc1fe73be Do not use .wait_for_seen() in test_one_account_send_bcc_setting
.wait_for_seen() is unreliable, because sometimes Dovecot
sends only EXISTS to the IDLE connection, but not the FETCH.
Dovecot sends updates like FETCH only if some
connection has already observed the message in previous state
without the \Seen flag.

To avoid this race condition, wait until the core sets the flag,
then FETCH the message manually and check that the flag is set.
2023-03-23 22:57:35 +00:00
link2xt
fa278d50f7 Simplify layout of the deltachat-rpc-server crate 2023-03-23 22:10:28 +00:00
link2xt
2f02be4c64 ci: test the core on macOS 2023-03-23 20:46:58 +00:00
link2xt
7add1c116c Attempt to fix deltachat-rpc-server binary release uploads 2023-03-23 17:19:28 +00:00
link2xt
124a0e90e1 Release 1.112.0 2023-03-23 13:34:24 +00:00
Floris Bruynooghe
1716cdf51c ref(ffi): dc_receive_backup should block (#4211)
The documentation says this blocks.  This should block because it also
means the error reporting is more accurate by calling set_last_error
just before returning.
2023-03-23 14:15:34 +01:00
link2xt
3fdcffb314 Increase reference count before spawning background tasks in dc_receive_backup()
Merged PR <https://github.com/deltachat/deltachat-core-rust/pull/4206>
2023-03-23 08:54:33 +00:00
link2xt
f033aae25c Move most of the dc_receive_backup() into a safe function 2023-03-23 08:47:58 +00:00
Floris Bruynooghe
c42d942460 explicitly move for good measure 2023-03-23 08:47:58 +00:00
Floris Bruynooghe
0ba8201797 fix(dc_receive_backup): Increase refcount before spawn
Otherwise it is possible for the context that is used in the spawn to
be unreferenced.  Really this should be caught by the borrow checker
that ensures we only spawn things with a 'static lifetime, but we're
handling raw pointers so it doesn't.
2023-03-23 08:47:49 +00:00
iequidoo
87252ab053 cargo: bump async_zip from 0.0.9 to 0.0.11
Bumps [async_zip](https://github.com/Majored/rs-async-zip) from 0.0.9 to 0.0.11.
- [Release notes](https://github.com/Majored/rs-async-zip/releases)
- [Commits](https://github.com/Majored/rs-async-zip/compare/v0.0.9...v0.0.11)

---
updated-dependencies:
- dependency-name: async_zip
  dependency-type: direct:production
  update-type: version-update:semver-patch
...
2023-03-23 00:33:25 +00:00
link2xt
53eec521dc Make get_all_addresses_from_header non-generic
This saves 1287 lines according to `cargo llvm-lines --release`.
2023-03-22 23:51:41 +00:00
link2xt
238570a7b9 deltachat-ffi: update rand to 0.8.5 2023-03-22 23:37:01 +00:00
link2xt
043ae48806 Set pytest-timeout on async python tests 2023-03-22 20:41:17 +00:00
link2xt
fb88f2e6ab Reintroduce pytest-rerunfailures
Tests on GitHub Actions are very flaky recently.
2023-03-22 20:39:35 +00:00
link2xt
5db867cd1b Add IMAP_INBOX_IDLE event 2023-03-22 20:20:37 +00:00
B. Petersen
ec00c160c6 add missing define 2023-03-22 20:54:52 +01:00
Floris Bruynooghe
616eabc613 feat: Make the IoPausedGuard a simple sender (#4184)
This replaces the mechanism by which the IoPauseGuard makes sure the
IO scheduler is resumed: it really is a drop guard now by sending a
single message on drop.

This makes it not have to hold on to anything like the context so
makes it a lot easier to use.

The trade-off is that a long-running task is spawned when the guard is
created, this task needs to receive the message from the drop guard in
order for the scheduler to resume.
2023-03-22 17:42:21 +01:00
Floris Bruynooghe
89b32e02c5 deps(iroh): switch back to iroh main branch (#4202) 2023-03-22 16:05:55 +01:00
Floris Bruynooghe
e985588c6c ref(jsonrpc): Getting backup provider QR code now blocks (#4198)
This changes the JSON-RPC APIs to get a QR code from the backup
provider to block.  It means once you have a (blocking) call to
provide_backup() you can call get_backup_qr() or get_backup_qr_svg()
and they will block until the QR code is available.

Calling get_backup_qr() or get_backup_qr_svg() when there is no backup
provider will immediately error.
2023-03-22 12:45:38 +01:00
link2xt
7ec3a1a9a2 ci: fixup for artifact uploading in deltachat-rpc-server.yml 2023-03-21 23:17:15 +00:00
link2xt
19fa86b276 ci: remove android dependency from deltachat-rpc-server workflow 2023-03-21 22:21:05 +00:00
link2xt
c4657991c8 ci: build all deltachat-rpc-server binaries without NDK 2023-03-21 22:17:14 +00:00
link2xt
484aebdb16 smtp: disable buffering while running STARTTLS
Otherwise TLS setup fails on macOS and iOS with `errSSLClosedAbort`.
(<https://developer.apple.com/documentation/security/errsslclosedabort>)
2023-03-21 17:57:52 +00:00
Floris Bruynooghe
9c15cd5c8f Explicitly call Context::set_last_error in ffi (#4195)
This adds a result extension trait to explicitly set the last error,
which *should* be the default for the FFI.  Currently not touching all
APIs since that's potentially disruptive and we're close to a release.

The logging story is messy, as described in the doc comment.  We
should further clean this up and tidy up these APIs so it's more
obvious to people how to do the right thing.
2023-03-21 13:37:25 +01:00
Hocuri
8302d22622 Improve comment on write_lock() (#4134) 2023-03-21 11:49:14 +01:00
bjoern
034cde9289 typo: CollectionReceived (#4189) 2023-03-21 10:21:30 +01:00
link2xt
02455d8485 ci: upload deltachat-rpc-server binaries on release 2023-03-20 18:59:14 +00:00
Floris Bruynooghe
35f50a8965 feat: Pause IO for BackupProvider (#4182)
This makes the BackupProvider automatically invoke pause-io while it
is needed.

It needed to make the guard independent from the Context lifetime to
make this work.  Which is a bit sad.
2023-03-20 19:57:17 +01:00
link2xt
e04efdbd94 tox: quiet noisy message from black 2023-03-20 17:57:38 +00:00
Hocuri
57445eedb1 More accurate maybe_add_bcc_self device message text (#4175)
* More accurate maybe_add_bcc_self device message text

* changelog

* Update src/imex.rs

Co-authored-by: bjoern <r10s@b44t.com>

* Capitalize Send Copy to Self

---------

Co-authored-by: bjoern <r10s@b44t.com>
2023-03-20 12:54:16 +01:00
link2xt
a501f10756 Get rid of duplicate uuid dependency 2023-03-20 10:07:59 +00:00
link2xt
5d80d4788c Pause I/O in get_backup() 2023-03-20 10:24:59 +01:00
link2xt
0c02886005 Update human-panic, but disable color
Avoid pulling in new `anstream` dependency
2023-03-19 19:10:25 +00:00
link2xt
24856f3050 Merge branch 'flub/send-backup'
PR: <https://github.com/deltachat/deltachat-core-rust/pull/4007>
2023-03-19 15:21:59 +00:00
link2xt
8e6434068e Fix remaining cargo-deny warnings 2023-03-19 14:40:46 +00:00
link2xt
800d2b14a5 Add cargo-deny exceptions for old crates 2023-03-19 14:37:23 +00:00
B. Petersen
3a861d2f84 some doxygen fixes 2023-03-19 15:24:51 +01:00
dependabot[bot]
4ba00f7440 Merge pull request #4171 from deltachat/dependabot/cargo/axum-0.6.11 2023-03-19 13:30:54 +00:00
link2xt
40fc61da4f changelog: add link and date to the latest release 2023-03-19 12:07:55 +00:00
link2xt
eb0f896d57 Use scheduler.is_running() 2023-03-19 11:23:09 +00:00
link2xt
71bb89fac1 Merge remote-tracking branch 'origin/master' into flub/send-backup 2023-03-19 11:10:07 +00:00
link2xt
b89199db54 Merge branch 'flub/pause-io'
PR: <https://github.com/deltachat/deltachat-core-rust/pull/4138>
2023-03-19 11:06:01 +00:00
link2xt
e39429c2e3 rustfmt 2023-03-19 10:18:49 +00:00
link2xt
17de3d3236 Remove TODOs 2023-03-19 10:17:18 +00:00
link2xt
3177f9967d Add a comment aronud IMAP loop task handle 2023-03-19 10:16:43 +00:00
link2xt
81418d8ee5 Log error on pause guard drop without resuming instead of working around
I checked that tests still pass even if error! is replaced with panic!
2023-03-19 10:13:59 +00:00
link2xt
a2e7d914a0 Changelog fixup 2023-03-19 09:37:09 +00:00
Floris Bruynooghe
4bf38c0e29 clippy 2023-03-19 09:36:41 +00:00
Floris Bruynooghe
0079cd4766 Add changelog 2023-03-19 09:36:38 +00:00
Floris Bruynooghe
2c3b2b8c2d move pause to only exist on Scheduler 2023-03-19 09:36:03 +00:00
Floris Bruynooghe
52fa58a3ce No need for jsonrpc to do this manually 2023-03-19 09:36:03 +00:00
Floris Bruynooghe
32a7e5ed82 Remove requirement for stopping io for imex 2023-03-19 09:36:03 +00:00
Floris Bruynooghe
097113f01e fixup paused flag use 2023-03-19 09:36:03 +00:00
Floris Bruynooghe
1d42e4743f Allow pausing IO scheduler from inside core
To handle backups the UIs have to make sure they do stop the IO
scheduler and also don't accidentally restart it while working on it.
Since they have to call start_io from a bunch of locations this can be
a bit difficult to manage.

This introduces a mechanism for the core to pause IO for some time,
which is used by the imex function.  It interacts well with other
calls to dc_start_io() and dc_stop_io() making sure that when resumed
the scheduler will be running or not as the latest calls to them.

This was a little more invasive then hoped due to the scheduler.  The
additional abstraction of the scheduler on the context seems a nice
improvement though.
2023-03-19 09:36:03 +00:00
dependabot[bot]
5ecdea47db cargo: bump axum from 0.6.9 to 0.6.11
Bumps [axum](https://github.com/tokio-rs/axum) from 0.6.9 to 0.6.11.
- [Release notes](https://github.com/tokio-rs/axum/releases)
- [Changelog](https://github.com/tokio-rs/axum/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/axum/compare/axum-v0.6.9...axum-v0.6.11)

---
updated-dependencies:
- dependency-name: axum
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-19 09:35:20 +00:00
dependabot[bot]
5b92b6355e Merge pull request #4168 from deltachat/dependabot/cargo/fuzz/libsqlite3-sys-0.25.2 2023-03-19 09:24:18 +00:00
link2xt
5eb7206b2d Format documentation comment for sync_qr_code_token_deletion 2023-03-19 00:10:45 +00:00
link2xt
a566fd6301 Upgrade async-smtp to 0.9.0
async-smtp does not implement read buffering anymore
and expects library user to implement it.

To implement read buffer, we wrap streams into BufStream
instead of BufWriter.
2023-03-18 21:26:39 +00:00
link2xt
3eadc86217 Update Rust in coredeps docker image to 1.68.0 2023-03-18 21:08:40 +00:00
dependabot[bot]
0a65081db0 Bump libsqlite3-sys from 0.24.2 to 0.25.2 in /fuzz
Bumps [libsqlite3-sys](https://github.com/rusqlite/rusqlite) from 0.24.2 to 0.25.2.
- [Release notes](https://github.com/rusqlite/rusqlite/releases)
- [Changelog](https://github.com/rusqlite/rusqlite/blob/master/Changelog.md)
- [Commits](https://github.com/rusqlite/rusqlite/compare/libsqlite3-sys-0.24.2...v0.25.2)

---
updated-dependencies:
- dependency-name: libsqlite3-sys
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-18 19:31:46 +00:00
link2xt
dd57854ee3 Increase Minimum Supported Rust Version to 1.64
It is required for clap_lex v0.3.2
and async_zip 0.0.11.
2023-03-18 19:30:11 +00:00
link2xt
b83b9db712 repl: print errors with causes 2023-03-18 14:37:50 +00:00
dignifiedquire
a59e72e7d8 update iroh 2023-03-17 23:41:36 +01:00
dignifiedquire
fd358617f5 feat: implement more detailed progress on sending 2023-03-17 23:37:00 +01:00
link2xt
b26a351786 Switch quinn to the main branch
It has Android fixes merged
2023-03-17 22:26:42 +00:00
link2xt
a32a3b8cca Construct HashMaps in provider database from array
This saves 450 lines according to `cargo llvm-lines --release`.
2023-03-17 18:30:37 +00:00
dignifiedquire
575b43d9a0 Merge remote-tracking branch 'origin/master' into flub/send-backup 2023-03-17 15:53:40 +01:00
Friedel Ziegelmayer
6c5654f584 fix: do not delete columns
This requires currently too much memory, crashing on larger instances
2023-03-17 15:35:08 +01:00
link2xt
0a5542a698 Log transfer rate on success 2023-03-17 10:45:43 +00:00
dignifiedquire
518bd19e96 fix: do not block transfer on db import 2023-03-17 11:29:27 +01:00
link2xt
961e3ad7e2 Update spin 0.9.5->0.9.6 2023-03-17 00:00:37 +00:00
dignifiedquire
7a49e9401f fixup clippy & fmt 2023-03-16 17:53:27 +01:00
dignifiedquire
3701936129 Merge remote-tracking branch 'origin/master' into flub/send-backup 2023-03-16 17:50:00 +01:00
dignifiedquire
c02686b56e update iroh 2023-03-16 17:38:15 +01:00
link2xt
9a7ff9d2b1 Switch quinn to ecn-einval-fallback branch. 2023-03-16 16:25:46 +00:00
link2xt
56f6d6849e Patch quinn to work on android 2023-03-15 12:45:58 +00:00
link2xt
cbc18ee5a4 Log connection errors 2023-03-14 18:49:14 +00:00
dignifiedquire
14521cfc2d improve address handling 2023-03-14 15:38:28 +01:00
dignifiedquire
5e4807b7ac update patched default-net 2023-03-13 12:51:54 +01:00
link2xt
28d9bec0b4 Patch default-net 2023-03-10 17:14:01 +00:00
Floris Bruynooghe
05e50ea787 Connect to all addresses the provider has
This uses a branch directly from iroh repo again
2023-03-09 16:49:34 +01:00
Floris Bruynooghe
02afacf989 clarify docs 2023-03-09 16:12:33 +01:00
Floris Bruynooghe
c7de4f66e7 Bind to 0.0.0.0 2023-03-09 15:34:15 +01:00
Floris Bruynooghe
c9b8c5079b wording 2023-03-07 15:45:18 +01:00
Floris Bruynooghe
eec5ae96e8 Update docs and fix string allocation
The docs say you should always unref the string and NULL is never
returned.  The implementation should follow that.
2023-03-07 15:36:33 +01:00
Floris Bruynooghe
4b94eadf5e typo 2023-03-07 14:40:51 +01:00
Floris Bruynooghe
52a1886937 naming conventions!
they're hard
2023-03-07 14:40:01 +01:00
Floris Bruynooghe
9767f51c3d update .h file too 2023-03-07 14:13:42 +01:00
Floris Bruynooghe
6674b888cc Merge branch 'master' into flub/send-backup 2023-03-07 12:52:45 +01:00
Floris Bruynooghe
a5e6bd3e8e Do not require context for non-context methods
This follows the ffi style better.
2023-03-07 12:49:42 +01:00
Floris Bruynooghe
b6c24932a7 Apply typos from code review
Co-authored-by: Hocuri <hocuri@gmx.de>
2023-03-07 12:23:30 +01:00
Floris Bruynooghe
d73d56c399 bump testdir for windows bug workaround 2023-03-03 13:13:58 +01:00
Floris Bruynooghe
731e90f0d5 update cargo-deny 2023-03-03 12:53:43 +01:00
Floris Bruynooghe
e0a6c2ef54 Merge branch 'master' into flub/send-backup 2023-03-03 12:46:05 +01:00
Floris Bruynooghe
c5408e0561 Merge branch 'master' into flub/send-backup 2023-03-03 09:48:33 +01:00
Floris Bruynooghe
c1a2df91ac Fix typo in blob names
This is now tested properly too.
2023-03-02 21:53:13 +01:00
Floris Bruynooghe
da85c2412e fix iterator 2023-03-02 21:48:14 +01:00
Floris Bruynooghe
d108f9b3e3 clippy 2023-03-02 11:47:03 +01:00
Floris Bruynooghe
e3014a349c Merge branch 'master' into flub/send-backup 2023-03-02 11:35:17 +01:00
Floris Bruynooghe
9d88ef069e log some more 2023-03-02 11:21:05 +01:00
Floris Bruynooghe
155dff2813 renaming of upstream 2023-03-02 11:18:30 +01:00
Floris Bruynooghe
38d4ea8514 Use std::slice::Iter instead of manually tracking the offset 2023-03-02 11:15:02 +01:00
Floris Bruynooghe
6f24874eb8 Use a RAII guard to remove the db export 2023-03-02 10:58:39 +01:00
Floris Bruynooghe
2d20812652 some typos 2023-03-02 09:39:50 +01:00
Floris Bruynooghe
5762fbb9a7 Allow JSON-RPC to get text of QR code as well
Desktop does use this as it allows reading QR codes as text from the
clipboard as well as copying the QR text to the clipboard instead of
showing the QR code.
2023-03-01 11:27:21 +01:00
Floris Bruynooghe
0e06da22df fix symbol name 2023-02-22 18:51:17 +01:00
Floris Bruynooghe
5833a9b347 fix doc comments 2023-02-22 18:50:32 +01:00
Floris Bruynooghe
0ef8d57881 Merge branch 'master' into flub/send-backup 2023-02-22 18:15:23 +01:00
Floris Bruynooghe
fc64c33368 Use released version of sendme^Wiroh
This switches to a released version.  It has been renamed from sendme
to iroh.
2023-02-22 16:05:24 +01:00
Floris Bruynooghe
1b39be8a42 Merge branch 'master' into flub/send-backup 2023-02-22 15:54:23 +01:00
Floris Bruynooghe
a1e19e2c41 Merge branch 'master' into flub/send-backup 2023-02-20 17:39:52 +01:00
Floris Bruynooghe
b920db12c7 Split _wait and _unref
This also removes BackupProvider::join in favour of implementing
Future directly.  I wondered about implementing a FusedFutre to make
this a little safer but it would introduce a dependency on the futures
crate in deltachat-ffi which did not exist yet, so I didn't do that.
2023-02-20 15:56:05 +01:00
Floris Bruynooghe
73b90eee3e improve docs 2023-02-20 13:10:29 +01:00
Floris Bruynooghe
4637a28bf6 doc comment 2023-02-20 13:08:43 +01:00
Floris Bruynooghe
d0638c1542 typo 2023-02-20 13:05:11 +01:00
Floris Bruynooghe
788d3125a3 Do not save svg to file, just print qr text 2023-02-20 13:02:16 +01:00
Floris Bruynooghe
3c4ffc3550 Some fixes 2023-02-20 12:58:23 +01:00
Floris Bruynooghe
ada858f439 Improve comments, mostly ffi. and some renames 2023-02-20 12:48:43 +01:00
Floris Bruynooghe
f2570945c6 Don't reimplement qr::format_backup 2023-02-16 18:18:18 +01:00
Floris Bruynooghe
8072f78058 Do not emit ImexEvent From BlobDirIter
We no longer need that in the transfer case, that would give very
weird results.  This also means there is nothing imex-specific about
this anymore so move it to blobs.rs
2023-02-16 18:05:09 +01:00
Floris Bruynooghe
8ae0ee5a67 Merge branch 'master' into flub/send-backup 2023-02-16 17:19:31 +01:00
Floris Bruynooghe
a75d2b1c80 Create a blocking call for jsonrpc 2023-02-16 17:15:54 +01:00
Floris Bruynooghe
c48c2af7a1 Allow retrieval of backup QR on context
This enables being able to get the QR code without needing to have
access to the BackupProvider itself.  This is useful for the JSON-RPC
server.
2023-02-16 16:49:20 +01:00
Floris Bruynooghe
490a14c5ef Remove the need for a directory for db export
Plus on import use the context directory.  We can actually write there
just fine.
2023-02-16 16:06:41 +01:00
Floris Bruynooghe
dcce6ef50b Some docs 2023-02-16 15:19:44 +01:00
Floris Bruynooghe
7cf0820d2b diff 2023-02-16 14:56:18 +01:00
Floris Bruynooghe
0bae3caaff dear CI masters: i regret every trying to be clever 2023-02-16 14:55:24 +01:00
Floris Bruynooghe
bca0b256c9 goodness ci? 2023-02-16 14:52:05 +01:00
Floris Bruynooghe
a53d30c459 fixed another bug, try main again 2023-02-16 14:49:48 +01:00
Floris Bruynooghe
7a9f497aa7 why can't i see this action now? 2023-02-16 14:49:04 +01:00
Floris Bruynooghe
f9f9bc3efb yaml 2023-02-16 09:10:47 +01:00
Floris Bruynooghe
904990bf91 ugh, yaml syntax 2023-02-16 09:08:14 +01:00
Floris Bruynooghe
b2266ffca1 make the have a valid on spec at least so gh doesn't complain too much 2023-02-16 09:06:40 +01:00
Floris Bruynooghe
bb9a3d4b8e more bug hunting: disable most ci, point to branch 2023-02-16 09:00:27 +01:00
Floris Bruynooghe
e565e19b42 fix msrv in sendme 2023-02-15 16:01:39 +01:00
Floris Bruynooghe
41319c85c7 patch in previous revision of sendme
main broke rust 1.63 support :'(
2023-02-15 15:12:14 +01:00
Floris Bruynooghe
daf56804a5 use correct branch 2023-02-15 14:57:26 +01:00
Floris Bruynooghe
6f7a43804d Add changelog 2023-02-15 14:48:17 +01:00
Floris Bruynooghe
0ca76d36ef Merge branch 'master' into flub/send-backup 2023-02-15 14:46:57 +01:00
Floris Bruynooghe
ec5789997a back to master 2023-02-15 14:45:52 +01:00
Floris Bruynooghe
7a0d61bbb0 hey 2023-02-15 13:50:39 +01:00
Floris Bruynooghe
1c2461974d better way 2023-02-14 18:29:15 +01:00
Floris Bruynooghe
2a754744fe char, not chat 2023-02-14 18:03:39 +01:00
Floris Bruynooghe
b413593c43 hi 2023-02-14 17:39:58 +01:00
Floris Bruynooghe
c73edd7e21 oh 2023-02-14 17:20:25 +01:00
Floris Bruynooghe
a34a69d8e4 yes, ci fun 2023-02-14 17:15:52 +01:00
Floris Bruynooghe
020a9d33f6 new sendme for lower msrv 2023-02-14 15:49:21 +01:00
Floris Bruynooghe
19f6f89312 no let else :( 2023-02-14 13:29:55 +01:00
Floris Bruynooghe
d56e05a11a fixup doc links 2023-02-14 13:27:15 +01:00
Floris Bruynooghe
c379a4e5a7 use ProgressEmitter from sendme 2023-02-14 13:19:43 +01:00
Floris Bruynooghe
44c1efe4e4 Add jsonrpc support 2023-02-14 13:05:54 +01:00
Floris Bruynooghe
ff0d675082 Make getting backup use the ongoing process 2023-02-14 12:19:40 +01:00
Floris Bruynooghe
e1087b4145 translate the string for qr code 2023-02-14 12:07:02 +01:00
Floris Bruynooghe
323535584b implement ffi and use public sendme 2023-02-13 18:25:12 +01:00
Floris Bruynooghe
852adbe514 bits left over from master merge 2023-02-13 15:45:38 +01:00
Floris Bruynooghe
4c78553d90 Merge branch 'master' into flub/send-backup 2023-02-13 11:25:51 +01:00
Floris Bruynooghe
a31ae5297a Add to repl example 2023-02-10 18:35:47 +01:00
Floris Bruynooghe
e7792a0c65 clippy 2023-02-10 18:27:03 +01:00
Floris Bruynooghe
3c32de1859 Generate a QR code 2023-02-10 18:16:01 +01:00
Floris Bruynooghe
6a3fe3db92 fixup doc comments 2023-02-10 14:15:39 +01:00
Floris Bruynooghe
ac048c154d Add progress for provider
Fix progress for getter.  Maths.  It's hard.

Add test for progress.
2023-02-10 13:54:50 +01:00
Floris Bruynooghe
3f51a8ffc2 Some more doc comments 2023-02-10 10:48:10 +01:00
Floris Bruynooghe
2129b2b7a0 Add a ton of code for receiver-side progress 2023-02-09 18:09:16 +01:00
Floris Bruynooghe
3734fc25a7 update callback to take collection by ref 2023-02-09 10:02:18 +01:00
Floris Bruynooghe
05ddc13054 Use name prefixes so the db can not be spoofed by a blob 2023-02-07 18:21:46 +01:00
Floris Bruynooghe
716504b833 do not pull in sendme cli deps 2023-02-07 17:20:35 +01:00
Floris Bruynooghe
187861c3b2 Make stuff work. With test! 2023-02-07 17:18:34 +01:00
Floris Bruynooghe
0b075ac762 Stop after a transfer happened. 2023-02-06 14:58:08 +01:00
Floris Bruynooghe
a6c889ed5e Clean up files on errors 2023-02-02 18:11:12 +01:00
Floris Bruynooghe
ca1533b0e4 delete device messages 2023-02-02 17:47:41 +01:00
Floris Bruynooghe
3267596a30 handle the database 2023-02-02 17:43:12 +01:00
Floris Bruynooghe
5f29b93970 Start of get support and create new module. 2023-02-02 17:15:23 +01:00
Floris Bruynooghe
2a6a21c33a handle the ongoing process correctly 2023-02-01 17:53:23 +01:00
Floris Bruynooghe
059af398eb Allow decoding the QR code 2023-02-01 17:06:07 +01:00
Floris Bruynooghe
6044e5961b Send and receive backup over network using QR code
This adds functionality to send and receive a backup over the network
using a QR code.

The sender or provider prepares the backup, sets up a server that
waits for clients.  It provides a ticket in the form of a QR code
which contains connection and authentication information.

The receiver uses the QR code to connect to the provider and fetches
backup, restoring it locally.
2023-02-01 16:45:09 +01:00
103 changed files with 6702 additions and 2645 deletions

View File

@@ -77,14 +77,17 @@ jobs:
- os: windows-latest
rust: 1.68.0
python: false # Python bindings compilation on Windows is not supported.
- os: macos-latest
rust: 1.68.0
python: 3.9
# Minimum Supported Rust Version = 1.63.0
# Minimum Supported Rust Version = 1.64.0
#
# Minimum Supported Python Version = 3.7
# This is the minimum version for which manylinux Python wheels are
# built.
- os: ubuntu-latest
rust: 1.63.0
rust: 1.64.0
python: 3.7
runs-on: ${{ matrix.os }}
steps:

View File

@@ -8,35 +8,14 @@ concurrency:
on:
workflow_dispatch:
release:
types: [published]
jobs:
# Build a version statically linked against musl libc
# to avoid problems with glibc version incompatibility.
build_static_linux:
name: Build deltachat-rpc-server for Linux
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Setup rust target
run: rustup target add x86_64-unknown-linux-musl
- name: Install musl-gcc
run: sudo apt install musl-tools
- name: Build
env:
RUSTFLAGS: "-C link-arg=-s"
run: cargo build --release --target x86_64-unknown-linux-musl -p deltachat-rpc-server --features vendored
- name: Upload binary
uses: actions/upload-artifact@v3
with:
name: deltachat-rpc-server-x86_64
path: target/x86_64-unknown-linux-musl/release/deltachat-rpc-server
if-no-files-found: error
build_linux:
name: Cross-compile deltachat-rpc-server for aarch64 and armv7 Linux
name: Cross-compile deltachat-rpc-server for x86_64, aarch64 and armv7 Linux
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
@@ -44,6 +23,13 @@ jobs:
- name: Build
run: sh scripts/zig-rpc-server.sh
- name: Upload x86_64 binary
uses: actions/upload-artifact@v3
with:
name: deltachat-rpc-server-x86_64
path: target/x86_64-unknown-linux-musl/release/deltachat-rpc-server
if-no-files-found: error
- name: Upload aarch64 binary
uses: actions/upload-artifact@v3
with:
@@ -58,50 +44,6 @@ jobs:
path: target/armv7-unknown-linux-musleabihf/release/deltachat-rpc-server
if-no-files-found: error
build_android:
name: Cross-compile deltachat-rpc-server for Android (armeabi-v7a, arm64-v8a, x86 and x86_64)
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: nttld/setup-ndk@v1
id: setup-ndk
with:
ndk-version: r21d
- name: Build
env:
ANDROID_NDK_ROOT: ${{ steps.setup-ndk.outputs.ndk-path }}
run: sh scripts/android-rpc-server.sh
- name: Upload binary
uses: actions/upload-artifact@v3
with:
name: deltachat-rpc-server-android-armv7
path: target/armv7-linux-androideabi/release/deltachat-rpc-server
if-no-files-found: error
- name: Upload binary
uses: actions/upload-artifact@v3
with:
name: deltachat-rpc-server-android-aarch64
path: target/aarch64-linux-android/release/deltachat-rpc-server
if-no-files-found: error
- name: Upload binary
uses: actions/upload-artifact@v3
with:
name: deltachat-rpc-server-android-i686
path: target/i686-linux-android/release/deltachat-rpc-server
if-no-files-found: error
- name: Upload binary
uses: actions/upload-artifact@v3
with:
name: deltachat-rpc-server-android-x86_64
path: target/x86_64-linux-android/release/deltachat-rpc-server
if-no-files-found: error
build_windows:
name: Build deltachat-rpc-server for Windows
strategy:
@@ -134,3 +76,31 @@ jobs:
name: deltachat-rpc-server-${{ matrix.artifact }}
path: target/${{ matrix.target}}/release/${{ matrix.path }}
if-no-files-found: error
publish:
name: Upload binaries to the release
needs: ["build_linux", "build_windows"]
permissions:
contents: write
runs-on: "ubuntu-latest"
steps:
- name: Download built binaries
uses: "actions/download-artifact@v3"
- name: Compose dist/ directory
run: |
mkdir dist
for x in x86_64 aarch64 armv7 win32.exe win64.exe; do
mv "deltachat-rpc-server-$x"/* "dist/deltachat-rpc-server-$x"
done
- name: List downloaded artifacts
run: ls -l dist/
- name: Upload binaries to the GitHub release
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
run: |
gh release upload ${{ github.ref_name }} \
--repo ${{ github.repository }} \
dist/*

View File

@@ -1,12 +1,28 @@
# Changelog
## Unreleased
## [Unreleased]
### Changes
- Drop unused SQL columns #4141
- "full message view" not needed because of footers that go to contact status #4151
- Pick up system's light/dark mode in generated message HTML #4150
- Support non-persistent configuration with DELTACHAT_* env
- Add support for `--version` argument to `deltachat-rpc-server`. #4224
It can be used to check the installed version without starting the server.
### Fixes
- deltachat-rpc-client: fix bug in `Chat.send_message()`: invalid `MessageData` field `quotedMsg` instead of `quotedMsgId`
## [1.112.0] - 2023-03-23
### Changes
- Increase MSRV to 1.64. #4167
- Core takes care of stopping and re-starting IO itself where needed,
e.g. during backup creation.
It is no longer needed to call `dc_stop_io()`.
`dc_start_io()` can now be called at any time without harm. #4138
- Pick up system's light/dark mode in generated message HTML. #4150
- More accurate `maybe_add_bcc_self` device message text. #4175
- "Full message view" not needed because of footers that go to contact status. #4151
- Support non-persistent configuration with `DELTACHAT_*` env. #4154
- Print deltachat-repl errors with causes. #4166
### Fixes
- Fix segmentation fault if `dc_context_unref()` is called during
@@ -15,9 +31,17 @@
during handling the JSON-RPC request. #4153
- Delete expired messages using multiple SQL requests. #4158
- Do not emit "Failed to run incremental vacuum" warnings on success. #4160
- Ability to send backup over network and QR code to setup second device #4007
- Disable buffering during STARTTLS setup. #4190
- Add `DC_EVENT_IMAP_INBOX_IDLE` event to wait until the account
is ready for testing.
It is used to fix race condition between fetching
existing messages and starting the test. #4208
### API-Changes
- Use `tracing` crate for logging. #3960
## 1.111.0
## [1.111.0] - 2023-03-05
### Changes
- Make smeared timestamp generation non-async. #4075
@@ -2306,3 +2330,7 @@
For a full list of changes, please see our closed Pull Requests:
https://github.com/deltachat/deltachat-core-rust/pulls?q=is%3Apr+is%3Aclosed
[unreleased]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.0...HEAD
[1.111.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.110.0...v1.111.0
[1.112.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.111.0...v1.112.0

1617
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,9 @@
[package]
name = "deltachat"
version = "1.111.0"
version = "1.112.0"
edition = "2021"
license = "MPL-2.0"
rust-version = "1.63"
rust-version = "1.64"
[profile.dev]
debug = 0
@@ -24,6 +24,11 @@ lto = true
panic = 'abort'
opt-level = "z"
[patch.crates-io]
default-net = { git = "https://github.com/dignifiedquire/default-net.git", rev="7a257095bac009c4be0b93c2979801624fdd337b" }
quinn-udp = { git = "https://github.com/quinn-rs/quinn", branch="main" }
quinn-proto = { git = "https://github.com/quinn-rs/quinn", branch="main" }
[dependencies]
deltachat_derive = { path = "./deltachat_derive" }
format-flowed = { path = "./format-flowed" }
@@ -33,8 +38,8 @@ anyhow = "1"
async-channel = "1.8.0"
async-imap = { git = "https://github.com/async-email/async-imap", branch = "master", default-features = false, features = ["runtime-tokio"] }
async-native-tls = { version = "0.5", default-features = false, features = ["runtime-tokio"] }
async-smtp = { version = "0.8", default-features = false, features = ["runtime-tokio"] }
async_zip = { version = "0.0.9", default-features = false, features = ["deflate"] }
async-smtp = { version = "0.9", default-features = false, features = ["runtime-tokio"] }
async_zip = { version = "0.0.11", default-features = false, features = ["deflate", "fs"] }
backtrace = "0.3"
base64 = "0.21"
bitflags = "1.3"
@@ -48,6 +53,8 @@ futures-lite = "1.12.0"
hex = "0.4.0"
humansize = "2"
image = { version = "0.24.5", default-features=false, features = ["gif", "jpeg", "ico", "png", "pnm", "webp", "bmp"] }
# iroh = { version = "0.3.0", default-features = false }
iroh = { git = 'https://github.com/n0-computer/iroh', branch = "main" }
kamadak-exif = "0.5"
lettre_email = { git = "https://github.com/deltachat/lettre", branch = "master" }
libc = "0.2"
@@ -59,8 +66,12 @@ once_cell = "1.17.0"
percent-encoding = "2.2"
parking_lot = "0.12"
pgp = { version = "0.9", default-features = false }
pin-project-lite = { version = "0.2.9" }
pretty_env_logger = { version = "0.4", optional = true }
qrcodegen = "1.7.0"
tracing = "0.1"
tracing-subscriber = { version = "0.3", default-features=false, features = ["std", "env-filter", "registry", "fmt"] }
tracing-futures = "0.2"
quick-xml = "0.27"
rand = "0.8"
regex = "1.7"
@@ -91,10 +102,9 @@ uuid = { version = "1", features = ["serde", "v4"] }
ansi_term = "0.12.0"
criterion = { version = "0.4.0", features = ["async_tokio"] }
futures-lite = "1.12"
log = "0.4"
pretty_env_logger = "0.4"
proptest = { version = "1", default-features = false, features = ["std"] }
tempfile = "3"
testdir = "0.7.2"
tokio = { version = "1", features = ["parking_lot", "rt-multi-thread", "macros"] }
[workspace]

View File

@@ -19,7 +19,7 @@ $ curl https://sh.rustup.rs -sSf | sh
Compile and run Delta Chat Core command line utility, using `cargo`:
```
$ RUST_LOG=deltachat_repl=info cargo run -p deltachat-repl -- ~/deltachat-db
$ cargo run -p deltachat-repl -- ~/deltachat-db
```
where ~/deltachat-db is the database file. Delta Chat will create it if it does not exist.

View File

@@ -2,15 +2,15 @@ use criterion::{criterion_group, criterion_main, Criterion};
use deltachat::context::Context;
use deltachat::stock_str::StockStrings;
use deltachat::{info, Event, EventType, Events};
use deltachat::{Event, EventType, Events};
use tempfile::tempdir;
async fn send_events_benchmark(context: &Context) {
let emitter = context.get_event_emitter();
for _i in 0..1_000_000 {
info!(context, "interesting event...");
context.emit_event(EventType::Info("interesting event...".to_string()));
}
info!(context, "DONE");
context.emit_event(EventType::Info("DONE".to_string()));
loop {
match emitter.recv().await.unwrap() {

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat_ffi"
version = "1.111.0"
version = "1.112.0"
description = "Deltachat FFI"
edition = "2018"
readme = "README.md"
@@ -17,14 +17,16 @@ crate-type = ["cdylib", "staticlib"]
deltachat = { path = "../", default-features = false }
deltachat-jsonrpc = { path = "../deltachat-jsonrpc", optional = true }
libc = "0.2"
human-panic = "1"
human-panic = { version = "1", default-features = false }
num-traits = "0.2"
serde_json = "1.0"
tokio = { version = "1", features = ["rt-multi-thread"] }
anyhow = "1"
thiserror = "1"
rand = "0.7"
rand = "0.8"
once_cell = "1.17.0"
tracing = "0.1"
tracing-subscriber = { version = "0.3", default-features=false, features = ["std", "registry"] }
[features]
default = ["vendored"]

View File

@@ -24,6 +24,7 @@ typedef struct _dc_provider dc_provider_t;
typedef struct _dc_event dc_event_t;
typedef struct _dc_event_emitter dc_event_emitter_t;
typedef struct _dc_jsonrpc_instance dc_jsonrpc_instance_t;
typedef struct _dc_backup_provider dc_backup_provider_t;
// Alias for backwards compatibility, use dc_event_emitter_t instead.
typedef struct _dc_event_emitter dc_accounts_event_emitter_t;
@@ -2100,8 +2101,7 @@ dc_contact_t* dc_get_contact (dc_context_t* context, uint32_t co
/**
* Import/export things.
* During backup import/export IO must not be started,
* if needed stop IO using dc_accounts_stop_io() or dc_stop_io() first.
*
* What to do is defined by the _what_ parameter which may be one of the following:
*
* - **DC_IMEX_EXPORT_BACKUP** (11) - Export a backup to the directory given as `param1`
@@ -2295,6 +2295,7 @@ void dc_stop_ongoing_process (dc_context_t* context);
#define DC_QR_FPR_MISMATCH 220 // id=contact
#define DC_QR_FPR_WITHOUT_ADDR 230 // test1=formatted fingerprint
#define DC_QR_ACCOUNT 250 // text1=domain
#define DC_QR_BACKUP 251
#define DC_QR_WEBRTC_INSTANCE 260 // text1=domain, text2=instance pattern
#define DC_QR_ADDR 320 // id=contact
#define DC_QR_TEXT 330 // text1=text
@@ -2320,7 +2321,7 @@ void dc_stop_ongoing_process (dc_context_t* context);
* ask whether to verify the contact;
* if so, start the protocol with dc_join_securejoin().
*
* - DC_QR_ASK_VERIFYGROUP withdc_lot_t::text1=Group name:
* - DC_QR_ASK_VERIFYGROUP with dc_lot_t::text1=Group name:
* ask whether to join the group;
* if so, start the protocol with dc_join_securejoin().
*
@@ -2340,6 +2341,10 @@ void dc_stop_ongoing_process (dc_context_t* context);
* ask the user if they want to create an account on the given domain,
* if so, call dc_set_config_from_qr() and then dc_configure().
*
* - DC_QR_BACKUP:
* ask the user if they want to set up a new device.
* If so, pass the qr-code to dc_receive_backup().
*
* - DC_QR_WEBRTC_INSTANCE with dc_lot_t::text1=domain:
* ask the user if they want to use the given service for video chats;
* if so, call dc_set_config_from_qr().
@@ -2630,6 +2635,117 @@ char* dc_get_last_error (dc_context_t* context);
void dc_str_unref (char* str);
/**
* @class dc_backup_provider_t
*
* Set up another device.
*/
/**
* Creates an object for sending a backup to another device.
*
* The backup is sent to through a peer-to-peer channel which is bootstrapped
* by a QR-code. The backup contains the entire state of the account
* including credentials. This can be used to setup a new device.
*
* This is a blocking call as some preparations are made like e.g. exporting
* the database. Once this function returns, the backup is being offered to
* remote devices. To wait until one device received the backup, use
* dc_backup_provider_wait(). Alternatively abort the operation using
* dc_stop_ongoing_process().
*
* During execution of the job #DC_EVENT_IMEX_PROGRESS is sent out to indicate
* state and progress.
*
* @memberof dc_backup_provider_t
* @param context The context.
* @return Opaque object for sending the backup.
* On errors, NULL is returned and dc_get_last_error() returns an error that
* should be shown to the user.
*/
dc_backup_provider_t* dc_backup_provider_new (dc_context_t* context);
/**
* Returns the QR code text that will offer the backup to other devices.
*
* The QR code contains a ticket which will validate the backup and provide
* authentication for both the provider and the recipient.
*
* The scanning device should call the scanned text to dc_check_qr(). If
* dc_check_qr() returns DC_QR_BACKUP, the backup transfer can be started using
* dc_get_backup().
*
* @memberof dc_backup_provider_t
* @param backup_provider The backup provider object as created by
* dc_backup_provider_new().
* @return The text that should be put in the QR code.
* On errors an empty string is returned, NULL is never returned.
* the returned string must be released using dc_str_unref() after usage.
*/
char* dc_backup_provider_get_qr (const dc_backup_provider_t* backup_provider);
/**
* Returns the QR code SVG image that will offer the backup to other devices.
*
* This works like dc_backup_provider_qr() but returns the text of a rendered
* SVG image containing the QR code.
*
* @memberof dc_backup_provider_t
* @param backup_provider The backup provider object as created by
* dc_backup_provider_new().
* @return The QR code rendered as SVG.
* On errors an empty string is returned, NULL is never returned.
* the returned string must be released using dc_str_unref() after usage.
*/
char* dc_backup_provider_get_qr_svg (const dc_backup_provider_t* backup_provider);
/**
* Waits for the sending to finish.
*
* This is a blocking call and should only be called once.
*
* @memberof dc_backup_provider_t
* @param backup_provider The backup provider object as created by
* dc_backup_provider_new(). If NULL is given nothing is done.
*/
void dc_backup_provider_wait (dc_backup_provider_t* backup_provider);
/**
* Frees a dc_backup_provider_t object.
*
* @memberof dc_backup_provider_t
* @param backup_provider The backup provider object as created by
* dc_backup_provider_new().
*/
void dc_backup_provider_unref (dc_backup_provider_t* backup_provider);
/**
* Gets a backup offered by a dc_backup_provider_t object on another device.
*
* This function is called on a device that scanned the QR code offered by
* dc_backup_sender_qr() or dc_backup_sender_qr_svg(). Typically this is a
* different device than that which provides the backup.
*
* This call will block while the backup is being transferred and only
* complete on success or failure. Use dc_stop_ongoing_process() to abort it
* early.
*
* During execution of the job #DC_EVENT_IMEX_PROGRESS is sent out to indicate
* state and progress. The process is finished when the event emits either 0
* or 1000, 0 means it failed and 1000 means it succeeded. These events are
* for showing progress and informational only, success and failure is also
* shown in the return code of this function.
*
* @memberof dc_context_t
* @param context The context.
* @param qr The qr code text, dc_check_qr() must have returned DC_QR_BACKUP
* on this text.
* @return 0=failure, 1=success.
*/
int dc_receive_backup (dc_context_t* context, const char* qr);
/**
* @class dc_accounts_t
*
@@ -5589,6 +5705,14 @@ void dc_event_unref(dc_event_t* event);
*/
#define DC_EVENT_IMAP_MESSAGE_MOVED 105
/**
* Emitted before going into IDLE on the Inbox folder.
*
* @param data1 0
* @param data2 0
*/
#define DC_EVENT_IMAP_INBOX_IDLE 106
/**
* Emitted when a new blob file was successfully written
*
@@ -6874,6 +6998,11 @@ void dc_event_unref(dc_event_t* event);
/// `%1$s` will be replaced by name and address of the contact.
#define DC_STR_PROTECTION_DISABLED_BY_OTHER 161
/// "Scan to set up second device for %1$s"
///
/// `%1$s` will be replaced by name and address of the account.
#define DC_STR_BACKUP_TRANSFER_QR 162
/**
* @}
*/

File diff suppressed because it is too large Load Diff

View File

@@ -14,6 +14,8 @@ use crate::summary::{Summary, SummaryPrefix};
/// eg. by chatlist.get_summary() or dc_msg_get_summary().
///
/// *Lot* is used in the meaning *heap* here.
// The QR code grew too large. So be it.
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub enum Lot {
Summary(Summary),
@@ -47,6 +49,7 @@ impl Lot {
Qr::FprMismatch { .. } => None,
Qr::FprWithoutAddr { fingerprint, .. } => Some(fingerprint),
Qr::Account { domain } => Some(domain),
Qr::Backup { .. } => None,
Qr::WebrtcInstance { domain, .. } => Some(domain),
Qr::Addr { draft, .. } => draft.as_deref(),
Qr::Url { url } => Some(url),
@@ -98,6 +101,7 @@ impl Lot {
Qr::FprMismatch { .. } => LotState::QrFprMismatch,
Qr::FprWithoutAddr { .. } => LotState::QrFprWithoutAddr,
Qr::Account { .. } => LotState::QrAccount,
Qr::Backup { .. } => LotState::QrBackup,
Qr::WebrtcInstance { .. } => LotState::QrWebrtcInstance,
Qr::Addr { .. } => LotState::QrAddr,
Qr::Url { .. } => LotState::QrUrl,
@@ -122,6 +126,7 @@ impl Lot {
Qr::FprMismatch { contact_id } => contact_id.unwrap_or_default().to_u32(),
Qr::FprWithoutAddr { .. } => Default::default(),
Qr::Account { .. } => Default::default(),
Qr::Backup { .. } => Default::default(),
Qr::WebrtcInstance { .. } => Default::default(),
Qr::Addr { contact_id, .. } => contact_id.to_u32(),
Qr::Url { .. } => Default::default(),
@@ -170,6 +175,8 @@ pub enum LotState {
/// text1=domain
QrAccount = 250,
QrBackup = 251,
/// text1=domain, text2=instance pattern
QrWebrtcInstance = 260,

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-jsonrpc"
version = "1.111.0"
version = "1.112.0"
description = "DeltaChat JSON-RPC API"
edition = "2021"
default-run = "deltachat-jsonrpc-server"
@@ -29,7 +29,7 @@ walkdir = "2.3.2"
base64 = "0.21"
# optional dependencies
axum = { version = "0.6.6", optional = true, features = ["ws"] }
axum = { version = "0.6.11", optional = true, features = ["ws"] }
env_logger = { version = "0.10.0", optional = true }
[dev-dependencies]

View File

@@ -47,6 +47,9 @@ pub enum JSONRPCEventType {
msg: String,
},
/// Emitted before going into IDLE on the Inbox folder.
ImapInboxIdle,
/// Emitted when an new file in the $BLOBDIR was created
NewBlobFile {
file: String,
@@ -293,6 +296,7 @@ impl From<EventType> for JSONRPCEventType {
EventType::SmtpMessageSent(msg) => SmtpMessageSent { msg },
EventType::ImapMessageDeleted(msg) => ImapMessageDeleted { msg },
EventType::ImapMessageMoved(msg) => ImapMessageMoved { msg },
EventType::ImapInboxIdle => ImapInboxIdle,
EventType::NewBlobFile(file) => NewBlobFile { file },
EventType::DeletedBlobFile(file) => DeletedBlobFile { file },
EventType::Warning(msg) => Warning { msg },

File diff suppressed because it is too large Load Diff

View File

@@ -32,6 +32,9 @@ pub enum QrObject {
Account {
domain: String,
},
Backup {
ticket: String,
},
WebrtcInstance {
domain: String,
instance_pattern: String,
@@ -126,6 +129,9 @@ impl From<Qr> for QrObject {
}
Qr::FprWithoutAddr { fingerprint } => QrObject::FprWithoutAddr { fingerprint },
Qr::Account { domain } => QrObject::Account { domain },
Qr::Backup { ticket } => QrObject::Backup {
ticket: ticket.to_string(),
},
Qr::WebrtcInstance {
domain,
instance_pattern,

View File

@@ -55,5 +55,5 @@
},
"type": "module",
"types": "dist/deltachat.d.ts",
"version": "1.111.0"
"version": "1.112.0"
}

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-repl"
version = "1.111.0"
version = "1.112.0"
license = "MPL-2.0"
edition = "2021"
@@ -14,6 +14,9 @@ pretty_env_logger = "0.4"
rusqlite = "0.28"
rustyline = "11"
tokio = { version = "1", features = ["fs", "rt-multi-thread", "macros"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", default-features=false, features = ["std", "env-filter", "registry", "fmt"] }
tracing-log = "0.1.3"
[features]
default = ["vendored"]

View File

@@ -336,6 +336,8 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
has-backup\n\
export-backup\n\
import-backup <backup-file>\n\
send-backup\n\
receive-backup <qr>\n\
export-keys\n\
import-keys\n\
export-setup\n\
@@ -463,7 +465,7 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
continue_key_transfer(&context, MsgId::new(arg1.parse()?), arg2).await?;
}
"has-backup" => {
has_backup(&context, blobdir).await?;
has_backup(blobdir).await?;
}
"export-backup" => {
let dir = dirs::home_dir().unwrap_or_default();
@@ -486,6 +488,17 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
)
.await?;
}
"send-backup" => {
let provider = BackupProvider::prepare(&context).await?;
let qr = provider.qr();
println!("QR code: {}", format_backup(&qr)?);
provider.await?;
}
"receive-backup" => {
ensure!(!arg1.is_empty(), "Argument <qr> is missing.");
let qr = check_qr(&context, arg1).await?;
deltachat::imex::get_backup(&context, qr).await?;
}
"export-keys" => {
let dir = dirs::home_dir().unwrap_or_default();
imex(&context, ImexMode::ExportSelfKeys, dir.as_ref(), None).await?;
@@ -495,7 +508,7 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
imex(&context, ImexMode::ImportSelfKeys, arg1.as_ref(), None).await?;
}
"export-setup" => {
let setup_code = create_setup_code(&context);
let setup_code = create_setup_code();
let file_name = blobdir.join("autocrypt-setup-message.html");
let file_content = render_setup_file(&context, &setup_code).await?;
fs::write(&file_name, file_content).await?;
@@ -1231,7 +1244,7 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
let socks5_enabled = context
.get_config_bool(config::Config::Socks5Enabled)
.await?;
match provider::get_provider_info(&context, arg1, socks5_enabled).await {
match provider::get_provider_info(arg1, socks5_enabled).await {
Some(info) => {
println!("Information for provider belonging to {arg1}:");
println!("status: {}", info.status as u32);

View File

@@ -22,7 +22,6 @@ use deltachat::qr_code_generator::get_securejoin_qr_svg;
use deltachat::securejoin::*;
use deltachat::stock_str::StockStrings;
use deltachat::{EventType, Events};
use log::{error, info, warn};
use rustyline::completion::{Completer, FilenameCompleter, Pair};
use rustyline::error::ReadlineError;
use rustyline::highlight::{Highlighter, MatchingBracketHighlighter};
@@ -33,6 +32,9 @@ use rustyline::{
};
use tokio::fs;
use tokio::runtime::Handle;
use tracing::{error, info, warn};
use tracing_log::LogTracer;
use tracing_subscriber::{fmt, EnvFilter};
mod cmdline;
use self::cmdline::*;
@@ -152,13 +154,15 @@ impl Completer for DcHelper {
}
}
const IMEX_COMMANDS: [&str; 12] = [
const IMEX_COMMANDS: [&str; 14] = [
"initiate-key-transfer",
"get-setupcodebegin",
"continue-key-transfer",
"has-backup",
"export-backup",
"import-backup",
"send-backup",
"receive-backup",
"export-keys",
"import-keys",
"export-setup",
@@ -359,7 +363,7 @@ async fn start(args: Vec<String>) -> Result<(), Error> {
false
}
Err(err) => {
println!("Error: {err}");
println!("Error: {err:#}");
true
}
}
@@ -374,7 +378,7 @@ async fn start(args: Vec<String>) -> Result<(), Error> {
break;
}
Err(err) => {
println!("Error: {err}");
println!("Error: {err:#}");
break;
}
}
@@ -479,7 +483,12 @@ async fn handle_cmd(
#[tokio::main]
async fn main() -> Result<(), Error> {
let _ = pretty_env_logger::try_init();
// Convert `log` records into `tracing` events.
LogTracer::init()?;
// Setup `tracing` subscriber according to `RUST_LOG` environment variable.
let filter = EnvFilter::try_from_default_env().or_else(|_| EnvFilter::try_new("info"))?;
fmt().with_env_filter(filter).with_writer(io::stderr).init();
let args = std::env::args().collect();
start(args).await?;

View File

@@ -126,7 +126,7 @@ class Chat:
"file": file,
"location": location,
"overrideSenderName": override_sender_name,
"quotedMsg": quoted_msg,
"quotedMessageId": quoted_msg,
}
msg_id = await self._rpc.send_msg(self.account.id, self.id, draft)
return Message(self.account, msg_id)

View File

@@ -31,6 +31,7 @@ class EventType(str, Enum):
SMTP_MESSAGE_SENT = "SmtpMessageSent"
IMAP_MESSAGE_DELETED = "ImapMessageDeleted"
IMAP_MESSAGE_MOVED = "ImapMessageMoved"
IMAP_INBOX_IDLE = "ImapInboxIdle"
NEW_BLOB_FILE = "NewBlobFile"
DELETED_BLOB_FILE = "DeletedBlobFile"
WARNING = "Warning"

View File

@@ -55,6 +55,11 @@ class ACFactory:
async def get_online_account(self) -> Account:
account = await self.new_configured_account()
await account.start_io()
while True:
event = await account.wait_for_event()
print(event)
if event.type == EventType.IMAP_INBOX_IDLE:
break
return account
async def get_online_accounts(self, num: int) -> List[Account]:

View File

@@ -15,6 +15,7 @@ passenv =
deps =
pytest
pytest-asyncio
pytest-timeout
aiohttp
aiodns
@@ -25,5 +26,8 @@ deps =
ruff
black
commands =
black --check --diff src/ examples/ tests/
black --quiet --check --diff src/ examples/ tests/
ruff src/ examples/ tests/
[pytest]
timeout = 60

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-rpc-server"
version = "1.111.0"
version = "1.112.0"
description = "DeltaChat JSON-RPC server"
edition = "2021"
readme = "README.md"
@@ -9,19 +9,17 @@ license = "MPL-2.0"
keywords = ["deltachat", "chat", "openpgp", "email", "encryption"]
categories = ["cryptography", "std", "email"]
[[bin]]
name = "deltachat-rpc-server"
[dependencies]
deltachat-jsonrpc = { path = "../deltachat-jsonrpc", default-features = false }
deltachat = { path = "..", default-features = false }
anyhow = "1"
env_logger = { version = "0.10.0" }
futures-lite = "1.12.0"
log = "0.4"
serde_json = "1.0.91"
serde = { version = "1.0", features = ["derive"] }
tokio = { version = "1.25.0", features = ["io-std"] }
tracing-subscriber = { version = "0.3", default-features=false, features = ["std", "env-filter", "fmt"] }
tracing = { version = "0.1" }
yerpc = { version = "0.4.0", features = ["anyhow_expose"] }
[features]

View File

@@ -1,26 +1,51 @@
use std::env;
///! Delta Chat core RPC server.
///!
///! It speaks JSON Lines over stdio.
use std::path::PathBuf;
use anyhow::Result;
use anyhow::{anyhow, Context as _, Result};
use deltachat::constants::DC_VERSION_STR;
use deltachat_jsonrpc::api::events::event_to_json_rpc_notification;
use deltachat_jsonrpc::api::{Accounts, CommandApi};
use futures_lite::stream::StreamExt;
use tokio::io::{self, AsyncBufReadExt, BufReader};
use tokio::task::JoinHandle;
use tracing::{info, trace};
use tracing_subscriber::{fmt, EnvFilter};
use yerpc::{RpcClient, RpcSession};
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let mut args = env::args_os();
let _program_name = args.next().context("no command line arguments found")?;
if let Some(first_arg) = args.next() {
if first_arg.to_str() == Some("--version") {
if let Some(arg) = args.next() {
return Err(anyhow!("Unrecognized argument {:?}", arg));
}
eprintln!("{}", &*DC_VERSION_STR);
return Ok(());
} else {
return Err(anyhow!("Unrecognized option {:?}", first_arg));
}
}
if let Some(arg) = args.next() {
return Err(anyhow!("Unrecognized argument {:?}", arg));
}
let filter = EnvFilter::try_from_default_env().or_else(|_| EnvFilter::try_new("info"))?;
fmt()
.with_env_filter(filter)
.with_writer(std::io::stderr)
.init();
let path = std::env::var("DC_ACCOUNTS_PATH").unwrap_or_else(|_| "accounts".to_string());
log::info!("Starting with accounts directory `{}`.", path);
info!("Starting with accounts directory `{}`.", path);
let accounts = Accounts::new(PathBuf::from(&path)).await?;
let events = accounts.get_event_emitter();
log::info!("Creating JSON-RPC API.");
info!("Creating JSON-RPC API.");
let state = CommandApi::new(accounts);
let (client, mut out_receiver) = RpcClient::new();
@@ -39,7 +64,7 @@ async fn main() -> Result<()> {
let send_task: JoinHandle<anyhow::Result<()>> = tokio::spawn(async move {
while let Some(message) = out_receiver.next().await {
let message = serde_json::to_string(&message)?;
log::trace!("RPC send {}", message);
trace!("RPC send {message}");
println!("{message}");
}
Ok(())
@@ -50,13 +75,13 @@ async fn main() -> Result<()> {
let stdin = io::stdin();
let mut lines = BufReader::new(stdin).lines();
while let Some(message) = lines.next_line().await? {
log::trace!("RPC recv {}", message);
trace!("RPC recv {}", message);
let session = session.clone();
tokio::spawn(async move {
session.handle_incoming(&message).await;
});
}
log::info!("EOF reached on stdin");
info!("EOF reached on stdin");
Ok(())
});

View File

@@ -12,27 +12,41 @@ ignore = [
# becoming empty. Adding versions forces us to revisit this at least
# when upgrading.
skip = [
{ name = "windows-sys", version = "<0.45" },
{ name = "wasi", version = "<0.11" },
{ name = "version_check", version = "<0.9" },
{ name = "uuid", version = "<1.3" },
{ name = "sha2", version = "<0.10" },
{ name = "rand_core", version = "<0.6" },
{ name = "rand_chacha", version = "<0.3" },
{ name = "rand", version = "<0.8" },
{ name = "nom", version = "<7.1" },
{ name = "idna", version = "<0.3" },
{ name = "humantime", version = "<2.1" },
{ name = "hermit-abi", version = "<0.3" },
{ name = "getrandom", version = "<0.2" },
{ name = "quick-error", version = "<2.0" },
{ name = "env_logger", version = "<0.10" },
{ name = "digest", version = "<0.10" },
{ name = "darling_macro", version = "<0.14" },
{ name = "darling_core", version = "<0.14" },
{ name = "darling", version = "<0.14" },
{ name = "block-buffer", version = "<0.10" },
{ name = "base64", version = "<0.21" },
{ name = "block-buffer", version = "<0.10" },
{ name = "darling", version = "<0.14" },
{ name = "darling_core", version = "<0.14" },
{ name = "darling_macro", version = "<0.14" },
{ name = "digest", version = "<0.10" },
{ name = "env_logger", version = "<0.10" },
{ name = "getrandom", version = "<0.2" },
{ name = "hermit-abi", version = "<0.3" },
{ name = "humantime", version = "<2.1" },
{ name = "idna", version = "<0.3" },
{ name = "nom", version = "<7.1" },
{ name = "quick-error", version = "<2.0" },
{ name = "rand", version = "<0.8" },
{ name = "rand_chacha", version = "<0.3" },
{ name = "rand_core", version = "<0.6" },
{ name = "sha2", version = "<0.10" },
{ name = "time", version = "<0.3" },
{ name = "version_check", version = "<0.9" },
{ name = "wasi", version = "<0.11" },
{ name = "windows-sys", version = "<0.45" },
{ name = "windows_x86_64_msvc", version = "<0.42" },
{ name = "windows_x86_64_gnu", version = "<0.42" },
{ name = "windows_i686_msvc", version = "<0.42" },
{ name = "windows_i686_gnu", version = "<0.42" },
{ name = "windows_aarch64_msvc", version = "<0.42" },
{ name = "unicode-xid", version = "<0.2.4" },
{ name = "syn", version = "<1.0" },
{ name = "quote", version = "<1.0" },
{ name = "proc-macro2", version = "<1.0" },
{ name = "portable-atomic", version = "<1.0" },
{ name = "spin", version = "<0.9.6" },
{ name = "convert_case", version = "0.4.0" },
{ name = "clap_lex", version = "0.2.4" },
{ name = "clap", version = "3.2.23" },
]
@@ -42,11 +56,21 @@ allow = [
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"CC0-1.0",
"MIT",
"BSL-1.0", # Boost Software License 1.0
"Unicode-DFS-2016",
"CC0-1.0",
"ISC",
"MIT",
"MPL-2.0",
"OpenSSL",
"Unicode-DFS-2016",
"Zlib",
]
[[licenses.clarify]]
name = "ring"
expression = "MIT AND ISC AND OpenSSL"
license-files = [
{ path = "LICENSE", hash = 0xbd0eed23 },
]
[sources.allow-org]
@@ -54,4 +78,7 @@ allow = [
github = [
"async-email",
"deltachat",
"n0-computer",
"quinn-rs",
"dignifiedquire",
]

View File

@@ -1,3 +1,5 @@
use tempfile::tempdir;
use deltachat::chat::{self, ChatId};
use deltachat::chatlist::*;
use deltachat::config;
@@ -6,24 +8,25 @@ use deltachat::context::*;
use deltachat::message::Message;
use deltachat::stock_str::StockStrings;
use deltachat::{EventType, Events};
use tempfile::tempdir;
use tracing::{error, info, warn};
use tracing_subscriber::{fmt, EnvFilter};
fn cb(event: EventType) {
match event {
EventType::ConfigureProgress { progress, .. } => {
log::info!("progress: {}", progress);
info!("progress: {progress}");
}
EventType::Info(msg) => {
log::info!("{}", msg);
info!("{msg}");
}
EventType::Warning(msg) => {
log::warn!("{}", msg);
warn!("{msg}");
}
EventType::Error(msg) => {
log::error!("{}", msg);
error!("{msg}");
}
event => {
log::info!("{:?}", event);
info!("{event:?}");
}
}
}
@@ -31,16 +34,22 @@ fn cb(event: EventType) {
/// Run with `RUST_LOG=simple=info cargo run --release --example simple -- email pw`.
#[tokio::main]
async fn main() {
pretty_env_logger::try_init_timed().ok();
let filter = EnvFilter::try_from_default_env()
.or_else(|_| EnvFilter::try_new("info"))
.unwrap();
fmt()
.with_env_filter(filter)
.with_writer(std::io::stderr)
.init();
let dir = tempdir().unwrap();
let dbfile = dir.path().join("db.sqlite");
log::info!("creating database {:?}", dbfile);
info!("creating database {:?}", dbfile);
let ctx = Context::new(&dbfile, 0, Events::new(), StockStrings::new())
.await
.expect("Failed to create context");
let info = ctx.get_info().await;
log::info!("info: {:#?}", info);
info!("info: {:#?}", info);
let events = ctx.get_event_emitter();
let events_spawn = tokio::task::spawn(async move {
@@ -49,7 +58,7 @@ async fn main() {
}
});
log::info!("configuring");
info!("configuring");
let args = std::env::args().collect::<Vec<String>>();
assert_eq!(args.len(), 3, "requires email password");
let email = args[1].clone();
@@ -63,9 +72,9 @@ async fn main() {
ctx.configure().await.unwrap();
log::info!("------ RUN ------");
info!("------ RUN ------");
ctx.start_io().await;
log::info!("--- SENDING A MESSAGE ---");
info!("--- SENDING A MESSAGE ---");
let contact_id = Contact::create(&ctx, "dignifiedquire", "dignifiedquire@gmail.com")
.await
@@ -73,7 +82,7 @@ async fn main() {
let chat_id = ChatId::create_for_contact(&ctx, contact_id).await.unwrap();
for i in 0..1 {
log::info!("sending message {}", i);
info!("sending message {}", i);
chat::send_text_msg(&ctx, chat_id, format!("Hi, here is my {i}nth message!"))
.await
.unwrap();
@@ -82,19 +91,19 @@ async fn main() {
// wait for the message to be sent out
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
log::info!("fetching chats..");
info!("fetching chats..");
let chats = Chatlist::try_load(&ctx, 0, None, None).await.unwrap();
for i in 0..chats.len() {
let msg = Message::load_from_db(&ctx, chats.get_msg_id(i).unwrap().unwrap())
.await
.unwrap();
log::info!("[{}] msg: {:?}", i, msg);
info!("[{i}] msg: {msg:?}");
}
log::info!("stopping");
info!("stopping");
ctx.stop_io().await;
log::info!("closing");
info!("closing");
drop(ctx);
events_spawn.await.unwrap();
}

1365
fuzz/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -37,6 +37,7 @@ module.exports = {
DC_EVENT_ERROR: 400,
DC_EVENT_ERROR_SELF_NOT_IN_GROUP: 410,
DC_EVENT_IMAP_CONNECTED: 102,
DC_EVENT_IMAP_INBOX_IDLE: 106,
DC_EVENT_IMAP_MESSAGE_DELETED: 104,
DC_EVENT_IMAP_MESSAGE_MOVED: 105,
DC_EVENT_IMEX_FILE_WRITTEN: 2052,
@@ -112,6 +113,7 @@ module.exports = {
DC_QR_ADDR: 320,
DC_QR_ASK_VERIFYCONTACT: 200,
DC_QR_ASK_VERIFYGROUP: 202,
DC_QR_BACKUP: 251,
DC_QR_ERROR: 400,
DC_QR_FPR_MISMATCH: 220,
DC_QR_FPR_OK: 210,
@@ -149,6 +151,7 @@ module.exports = {
DC_STR_AEAP_EXPLANATION_AND_LINK: 123,
DC_STR_ARCHIVEDCHATS: 40,
DC_STR_AUDIO: 11,
DC_STR_BACKUP_TRANSFER_QR: 162,
DC_STR_BAD_TIME_MSG_BODY: 85,
DC_STR_BROADCAST_LIST: 115,
DC_STR_CANNOT_LOGIN: 60,

View File

@@ -8,6 +8,7 @@ module.exports = {
103: 'DC_EVENT_SMTP_MESSAGE_SENT',
104: 'DC_EVENT_IMAP_MESSAGE_DELETED',
105: 'DC_EVENT_IMAP_MESSAGE_MOVED',
106: 'DC_EVENT_IMAP_INBOX_IDLE',
150: 'DC_EVENT_NEW_BLOB_FILE',
151: 'DC_EVENT_DELETED_BLOB_FILE',
300: 'DC_EVENT_WARNING',

View File

@@ -37,6 +37,7 @@ export enum C {
DC_EVENT_ERROR = 400,
DC_EVENT_ERROR_SELF_NOT_IN_GROUP = 410,
DC_EVENT_IMAP_CONNECTED = 102,
DC_EVENT_IMAP_INBOX_IDLE = 106,
DC_EVENT_IMAP_MESSAGE_DELETED = 104,
DC_EVENT_IMAP_MESSAGE_MOVED = 105,
DC_EVENT_IMEX_FILE_WRITTEN = 2052,
@@ -112,6 +113,7 @@ export enum C {
DC_QR_ADDR = 320,
DC_QR_ASK_VERIFYCONTACT = 200,
DC_QR_ASK_VERIFYGROUP = 202,
DC_QR_BACKUP = 251,
DC_QR_ERROR = 400,
DC_QR_FPR_MISMATCH = 220,
DC_QR_FPR_OK = 210,
@@ -149,6 +151,7 @@ export enum C {
DC_STR_AEAP_EXPLANATION_AND_LINK = 123,
DC_STR_ARCHIVEDCHATS = 40,
DC_STR_AUDIO = 11,
DC_STR_BACKUP_TRANSFER_QR = 162,
DC_STR_BAD_TIME_MSG_BODY = 85,
DC_STR_BROADCAST_LIST = 115,
DC_STR_CANNOT_LOGIN = 60,
@@ -289,6 +292,7 @@ export const EventId2EventName: { [key: number]: string } = {
103: 'DC_EVENT_SMTP_MESSAGE_SENT',
104: 'DC_EVENT_IMAP_MESSAGE_DELETED',
105: 'DC_EVENT_IMAP_MESSAGE_MOVED',
106: 'DC_EVENT_IMAP_INBOX_IDLE',
150: 'DC_EVENT_NEW_BLOB_FILE',
151: 'DC_EVENT_DELETED_BLOB_FILE',
300: 'DC_EVENT_WARNING',

View File

@@ -60,5 +60,5 @@
"test:mocha": "mocha -r esm node/test/test.js --growl --reporter=spec --bail --exit"
},
"types": "node/dist/index.d.ts",
"version": "1.111.0"
"version": "1.112.0"
}

View File

@@ -95,6 +95,7 @@ class Account:
ptr,
lib.dc_context_unref,
)
self._id = lib.dc_get_id(self._dc_context)
self._shutdown_event = Event()
self._event_thread = EventThread(self)
@@ -139,6 +140,10 @@ class Account:
"""return dictionary of built config parameters."""
return get_dc_info_as_dict(self._dc_context)
def get_id(self) -> int:
"""Return account ID."""
return self._id
def dump_account_info(self, logfile):
def log(*args, **kwargs):
kwargs["file"] = logfile

View File

@@ -190,7 +190,7 @@ class FFIEventTracker:
- ac2 is still running FetchExsistingMsgs job and thinks it's an existing, old message
- therefore no DC_EVENT_INCOMING_MSG is sent
"""
self.get_info_contains("INBOX: Idle entering")
self.get_matching("DC_EVENT_IMAP_INBOX_IDLE")
def wait_next_incoming_message(self):
"""wait for and return next incoming message."""
@@ -263,6 +263,7 @@ class EventThread(threading.Thread):
self._process_event(event)
def _process_event(self, event) -> None:
account_id = lib.dc_event_get_account_id(event)
evt = lib.dc_event_get_id(event)
data1 = lib.dc_event_get_data1_int(event)
# the following code relates to the deltachat/_build.py's helper
@@ -272,6 +273,7 @@ class EventThread(threading.Thread):
data2 = from_optional_dc_charpointer(lib.dc_event_get_data2_str(event))
else:
data2 = lib.dc_event_get_data2_int(event)
assert account_id == self.account.get_id(), f"data2={data2}"
lib.dc_event_unref(event)
ffi_event = FFIEvent(name=evt_name, data1=data1, data2=data2)

View File

@@ -1,19 +0,0 @@
import os
import subprocess
import sys
if __name__ == "__main__":
assert len(sys.argv) == 2
workspacedir = sys.argv[1]
for relpath in os.listdir(workspacedir):
if relpath.startswith("deltachat"):
p = os.path.join(workspacedir, relpath)
subprocess.check_call(
[
"auditwheel",
"repair",
p,
"-w",
workspacedir,
],
)

View File

@@ -220,16 +220,16 @@ def test_fetch_existing(acfactory, lp, mvbox_move):
acfactory.bring_accounts_online()
assert_folders_configured(ac1)
assert ac1.direct_imap.select_config_folder("mvbox" if mvbox_move else "inbox")
with ac1.direct_imap.idle() as idle1:
lp.sec("send out message with bcc to ourselves")
ac1.set_config("bcc_self", "1")
chat = acfactory.get_accepted_chat(ac1, ac2)
chat.send_text("message text")
assert_folders_configured(ac1)
lp.sec("send out message with bcc to ourselves")
ac1.set_config("bcc_self", "1")
chat = acfactory.get_accepted_chat(ac1, ac2)
chat.send_text("message text")
lp.sec("wait until the bcc_self message arrives in correct folder and is marked seen")
assert idle1.wait_for_seen()
lp.sec("wait until the bcc_self message arrives in correct folder and is marked seen")
if mvbox_move:
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder DeltaChat as seen.")
else:
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
assert_folders_configured(ac1)
lp.sec("create a cloned ac1 and fetch contact history during configure")
@@ -271,12 +271,12 @@ def test_fetch_existing_msgs_group_and_single(acfactory, lp):
ac1._evtracker.wait_next_incoming_message()
lp.sec("send out message with bcc to ourselves")
with ac1.direct_imap.idle() as idle1:
ac1.set_config("bcc_self", "1")
ac1_ac2_chat = ac1.create_chat(ac2)
ac1_ac2_chat.send_text("outgoing, encrypted direct message, creating a chat")
# wait until the bcc_self message arrives
assert idle1.wait_for_seen()
ac1.set_config("bcc_self", "1")
ac1_ac2_chat = ac1.create_chat(ac2)
ac1_ac2_chat.send_text("outgoing, encrypted direct message, creating a chat")
# wait until the bcc_self message arrives
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
lp.sec("Clone online account and let it fetch the existing messages")
ac1_clone = acfactory.new_online_configuring_account(cloned_from=ac1)

View File

@@ -134,22 +134,27 @@ def test_one_account_send_bcc_setting(acfactory, lp):
ac1.set_config("bcc_self", "1")
lp.sec("send out message with bcc to ourselves")
with ac1.direct_imap.idle() as idle1:
msg_out = chat.send_text("message2")
msg_out = chat.send_text("message2")
# wait for send out (BCC)
ev = ac1._evtracker.get_matching("DC_EVENT_SMTP_MESSAGE_SENT")
assert ac1.get_config("bcc_self") == "1"
# wait for send out (BCC)
ev = ac1._evtracker.get_matching("DC_EVENT_SMTP_MESSAGE_SENT")
assert ac1.get_config("bcc_self") == "1"
# now make sure we are sending message to ourselves too
assert self_addr in ev.data2
assert other_addr in ev.data2
assert idle1.wait_for_seen()
# Second client receives only second message, but not the first
# Second client receives only second message, but not the first.
ev_msg = ac1_clone._evtracker.wait_next_messages_changed()
assert ev_msg.text == msg_out.text
# now make sure we are sending message to ourselves too
assert self_addr in ev.data2
assert other_addr in ev.data2
# BCC-self messages are marked as seen by the sender device.
ac1._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
# Check that the message is marked as seen on IMAP.
ac1.direct_imap.select_folder("Inbox")
assert len(list(ac1.direct_imap.conn.fetch(AND(seen=True)))) == 1
def test_send_file_twice_unicode_filename_mangling(tmpdir, acfactory, lp):
ac1, ac2 = acfactory.get_online_accounts(2)
@@ -513,22 +518,22 @@ def test_send_and_receive_message_markseen(acfactory, lp):
msg4 = ac2._evtracker.wait_next_incoming_message()
lp.sec("mark messages as seen on ac2, wait for changes on ac1")
with ac1.direct_imap.idle() as idle1:
with ac2.direct_imap.idle() as idle2:
ac2.mark_seen_messages([msg2, msg4])
ev = ac2._evtracker.get_matching("DC_EVENT_MSGS_NOTICED")
assert msg2.chat.id == msg4.chat.id
assert ev.data1 == msg2.chat.id
assert ev.data2 == 0
idle2.wait_for_seen()
ac2.mark_seen_messages([msg2, msg4])
ev = ac2._evtracker.get_matching("DC_EVENT_MSGS_NOTICED")
assert msg2.chat.id == msg4.chat.id
assert ev.data1 == msg2.chat.id
assert ev.data2 == 0
ac2._evtracker.get_info_contains("Marked messages .* in folder INBOX as seen.")
lp.step("1")
for _i in range(2):
ev = ac1._evtracker.get_matching("DC_EVENT_MSG_READ")
assert ev.data1 > const.DC_CHAT_ID_LAST_SPECIAL
assert ev.data2 > const.DC_MSG_ID_LAST_SPECIAL
lp.step("2")
idle1.wait_for_seen() # Check that ac1 marks the read receipt as read
lp.step("1")
for _i in range(2):
ev = ac1._evtracker.get_matching("DC_EVENT_MSG_READ")
assert ev.data1 > const.DC_CHAT_ID_LAST_SPECIAL
assert ev.data2 > const.DC_MSG_ID_LAST_SPECIAL
lp.step("2")
# Check that ac1 marks the read receipt as read.
ac1._evtracker.get_info_contains("Marked messages .* in folder INBOX as seen.")
assert msg1.is_out_mdn_received()
assert msg3.is_out_mdn_received()
@@ -613,18 +618,24 @@ def test_markseen_message_and_mdn(acfactory, mvbox_move):
# Do not send BCC to self, we only want to test MDN on ac1.
ac1.set_config("bcc_self", "0")
acfactory.get_accepted_chat(ac1, ac2).send_text("hi")
msg = ac2._evtracker.wait_next_incoming_message()
ac2.mark_seen_messages([msg])
folder = "mvbox" if mvbox_move else "inbox"
for ac in [ac1, ac2]:
if mvbox_move:
ac._evtracker.get_info_contains("Marked messages [0-9]+ in folder DeltaChat as seen.")
else:
ac._evtracker.get_info_contains("Marked messages [0-9]+ in folder INBOX as seen.")
ac1.direct_imap.select_config_folder(folder)
ac2.direct_imap.select_config_folder(folder)
with ac1.direct_imap.idle() as idle1:
with ac2.direct_imap.idle() as idle2:
acfactory.get_accepted_chat(ac1, ac2).send_text("hi")
msg = ac2._evtracker.wait_next_incoming_message()
ac2.mark_seen_messages([msg])
idle2.wait_for_seen() # Check original message is marked as seen
idle1.wait_for_seen() # Check that the mdn is marked as seen
# Check that the mdn is marked as seen
assert len(list(ac1.direct_imap.conn.fetch(AND(seen=True)))) == 1
# Check original message is marked as seen
assert len(list(ac2.direct_imap.conn.fetch(AND(seen=True)))) == 1
def test_reply_privately(acfactory):
@@ -678,23 +689,24 @@ def test_mdn_asymmetric(acfactory, lp):
assert len(msg.chat.get_messages()) == 1
ac1.direct_imap.select_config_folder("mvbox")
with ac1.direct_imap.idle() as idle1:
lp.sec("ac2: mark incoming message as seen")
ac2.mark_seen_messages([msg])
lp.sec("ac2: mark incoming message as seen")
ac2.mark_seen_messages([msg])
lp.sec("ac1: waiting for incoming activity")
# MDN should be moved even though MDNs are already disabled
ac1._evtracker.get_matching("DC_EVENT_IMAP_MESSAGE_MOVED")
lp.sec("ac1: waiting for incoming activity")
# MDN should be moved even though MDNs are already disabled
ac1._evtracker.get_matching("DC_EVENT_IMAP_MESSAGE_MOVED")
assert len(chat.get_messages()) == 1
assert len(chat.get_messages()) == 1
# Wait for the message to be marked as seen on IMAP.
assert idle1.wait_for_seen()
# Wait for the message to be marked as seen on IMAP.
ac1._evtracker.get_info_contains("Marked messages 1 in folder DeltaChat as seen.")
# MDN is received even though MDNs are already disabled
assert msg_out.is_out_mdn_received()
ac1.direct_imap.select_config_folder("mvbox")
assert len(list(ac1.direct_imap.conn.fetch(AND(seen=True)))) == 1
def test_send_and_receive_will_encrypt_decrypt(acfactory, lp):
ac1, ac2 = acfactory.get_online_accounts(2)

View File

@@ -33,18 +33,6 @@ passenv =
CARGO_TARGET_DIR
RUSTC_WRAPPER
[testenv:auditwheels]
skipsdist = True
deps = auditwheel
passenv =
DCC_RS_DEV
DCC_RS_TARGET
AUDITWHEEL_ARCH
AUDITWHEEL_PLAT
AUDITWHEEL_POLICY
commands =
python tests/auditwheels.py {toxworkdir}/wheelhouse
[testenv:lint]
skipsdist = True
skip_install = True
@@ -55,7 +43,7 @@ deps =
pygments
restructuredtext_lint
commands =
black --check --diff setup.py install_python_bindings.py src/deltachat examples/ tests/
black --quiet --check --diff setup.py install_python_bindings.py src/deltachat examples/ tests/
ruff src/deltachat tests/ examples/
rst-lint --encoding 'utf-8' README.rst

View File

@@ -1,43 +0,0 @@
#!/bin/sh
# Build deltachat-rpc-server for Android.
set -e
test -n "$ANDROID_NDK_ROOT" || exit 1
RUSTUP_TOOLCHAIN="1.64.0"
rustup install "$RUSTUP_TOOLCHAIN"
rustup target add armv7-linux-androideabi aarch64-linux-android i686-linux-android x86_64-linux-android --toolchain "$RUSTUP_TOOLCHAIN"
KERNEL="$(uname -s | tr '[:upper:]' '[:lower:]')"
ARCH="$(uname -m)"
NDK_HOST_TAG="$KERNEL-$ARCH"
TOOLCHAIN="$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/$NDK_HOST_TAG"
PACKAGE="deltachat-rpc-server"
export CARGO_PROFILE_RELEASE_LTO=on
CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER="$TOOLCHAIN/bin/armv7a-linux-androideabi16-clang" \
TARGET_CC="$TOOLCHAIN/bin/armv7a-linux-androideabi16-clang" \
TARGET_AR="$TOOLCHAIN/bin/llvm-ar" \
TARGET_RANLIB="$TOOLCHAIN/bin/llvm-ranlib" \
cargo "+$RUSTUP_TOOLCHAIN" rustc --release --target armv7-linux-androideabi -p $PACKAGE
CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="$TOOLCHAIN/bin/aarch64-linux-android21-clang" \
TARGET_CC="$TOOLCHAIN/bin/aarch64-linux-android21-clang" \
TARGET_AR="$TOOLCHAIN/bin/llvm-ar" \
TARGET_RANLIB="$TOOLCHAIN/bin/llvm-ranlib" \
cargo "+$RUSTUP_TOOLCHAIN" rustc --release --target aarch64-linux-android -p $PACKAGE
CARGO_TARGET_I686_LINUX_ANDROID_LINKER="$TOOLCHAIN/bin/i686-linux-android16-clang" \
TARGET_CC="$TOOLCHAIN/bin/i686-linux-android16-clang" \
TARGET_AR="$TOOLCHAIN/bin/llvm-ar" \
TARGET_RANLIB="$TOOLCHAIN/bin/llvm-ranlib" \
cargo "+$RUSTUP_TOOLCHAIN" rustc --release --target i686-linux-android -p $PACKAGE
CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="$TOOLCHAIN/bin/x86_64-linux-android21-clang" \
TARGET_CC="$TOOLCHAIN/bin/x86_64-linux-android21-clang" \
TARGET_AR="$TOOLCHAIN/bin/llvm-ar" \
TARGET_RANLIB="$TOOLCHAIN/bin/llvm-ranlib" \
cargo "+$RUSTUP_TOOLCHAIN" rustc --release --target x86_64-linux-android -p $PACKAGE

View File

@@ -7,7 +7,7 @@ set -euo pipefail
#
# Avoid using rustup here as it depends on reading /proc/self/exe and
# has problems running under QEMU.
RUST_VERSION=1.64.0
RUST_VERSION=1.68.0
ARCH="$(uname -m)"
test -f "/lib/libc.musl-$ARCH.so.1" && LIBC=musl || LIBC=gnu

View File

@@ -220,13 +220,13 @@ if __name__ == "__main__":
process_dir(Path(sys.argv[1]))
out_all += "pub(crate) static PROVIDER_DATA: Lazy<HashMap<&'static str, &'static Provider>> = Lazy::new(|| [\n"
out_all += "pub(crate) static PROVIDER_DATA: Lazy<HashMap<&'static str, &'static Provider>> = Lazy::new(|| HashMap::from([\n"
out_all += out_domains
out_all += "].iter().copied().collect());\n\n"
out_all += "]));\n\n"
out_all += "pub(crate) static PROVIDER_IDS: Lazy<HashMap<&'static str, &'static Provider>> = Lazy::new(|| [\n"
out_all += "pub(crate) static PROVIDER_IDS: Lazy<HashMap<&'static str, &'static Provider>> = Lazy::new(|| HashMap::from([\n"
out_all += out_ids
out_all += "].iter().copied().collect());\n\n"
out_all += "]));\n\n"
if len(sys.argv) < 3:
now = datetime.datetime.utcnow()

View File

@@ -31,7 +31,9 @@ unset DCC_NEW_TMP_EMAIL
# Try to build wheels for a range of interpreters, but don't fail if they are not available.
# E.g. musllinux_1_1 does not have PyPy interpreters as of 2022-07-10
tox --workdir "$TOXWORKDIR" -e py37,py38,py39,py310,py311,pypy37,pypy38,pypy39,auditwheels --skip-missing-interpreters true
tox --workdir "$TOXWORKDIR" -e py37,py38,py39,py310,py311,pypy37,pypy38,pypy39 --skip-missing-interpreters true
auditwheel repair "$TOXWORKDIR/wheelhouse/deltachat*" -w "$TOXWORKDIR/wheelhouse"
echo -----------------------

View File

@@ -93,7 +93,7 @@ def main():
if "alpha" not in newversion:
for line in open("CHANGELOG.md"):
## 1.25.0
if line.startswith("## ") and line[2:].strip().startswith(newversion):
if line.startswith("## [") and line[4:].strip().startswith(newversion):
break
else:
raise SystemExit(

View File

@@ -7,17 +7,17 @@ set -e
unset RUSTFLAGS
ZIG_VERSION=0.11.0-dev.1935+1d96a17af
ZIG_VERSION=0.11.0-dev.2213+515e1c93e
# Download Zig
rm -fr "$ZIG_VERSION" "ZIG_VERSION.tar.xz"
rm -fr "$ZIG_VERSION" "zig-linux-x86_64-$ZIG_VERSION.tar.xz"
wget "https://ziglang.org/builds/zig-linux-x86_64-$ZIG_VERSION.tar.xz"
tar xf "zig-linux-x86_64-$ZIG_VERSION.tar.xz"
export PATH="$PWD/zig-linux-x86_64-$ZIG_VERSION:$PATH"
cargo install cargo-zigbuild
for TARGET in aarch64-unknown-linux-musl armv7-unknown-linux-musleabihf; do
for TARGET in x86_64-unknown-linux-musl aarch64-unknown-linux-musl armv7-unknown-linux-musleabihf; do
rustup target add "$TARGET"
cargo zigbuild --release --target "$TARGET" -p deltachat-rpc-server --features vendored
done

View File

@@ -262,7 +262,7 @@ impl Accounts {
pub async fn stop_io(&self) {
// Sending an event here wakes up event loop even
// if there are no accounts.
info!(self, "Stopping IO for all accounts");
self.emit_event(EventType::Info("Stopping IO for all accounts.".to_string()));
for account in self.accounts.values() {
account.stop_io().await;
}
@@ -271,14 +271,14 @@ impl Accounts {
/// Notifies all accounts that the network may have become available.
pub async fn maybe_network(&self) {
for account in self.accounts.values() {
account.maybe_network().await;
account.scheduler.maybe_network().await;
}
}
/// Notifies all accounts that the network connection may have been lost.
pub async fn maybe_network_lost(&self) {
for account in self.accounts.values() {
account.maybe_network_lost().await;
account.scheduler.maybe_network_lost(account).await;
}
}
@@ -291,6 +291,11 @@ impl Accounts {
pub fn get_event_emitter(&self) -> EventEmitter {
self.events.get_emitter()
}
/// Returns event channel.
pub fn events(&self) -> Events {
self.events.clone()
}
}
/// Configuration file name.

View File

@@ -4,13 +4,17 @@ use core::cmp::max;
use std::ffi::OsStr;
use std::fmt;
use std::io::Cursor;
use std::iter::FusedIterator;
use std::path::{Path, PathBuf};
use anyhow::{format_err, Context as _, Result};
use futures::StreamExt;
use image::{DynamicImage, ImageFormat};
use num_traits::FromPrimitive;
use tokio::io::AsyncWriteExt;
use tokio::{fs, io};
use tokio_stream::wrappers::ReadDirStream;
use tracing::{error, info, warn};
use crate::config::Config;
use crate::constants::{
@@ -160,9 +164,9 @@ impl<'a> BlobObject<'a> {
pub fn from_path(context: &'a Context, path: &Path) -> Result<BlobObject<'a>> {
let rel_path = path
.strip_prefix(context.get_blobdir())
.context("wrong blobdir")?;
.with_context(|| format!("wrong blobdir: {}", path.display()))?;
if !BlobObject::is_acceptible_blob_name(rel_path) {
return Err(format_err!("wrong name"));
return Err(format_err!("bad blob name: {}", rel_path.display()));
}
let name = rel_path.to_str().context("wrong name")?;
BlobObject::from_name(context, name.to_string())
@@ -326,7 +330,7 @@ impl<'a> BlobObject<'a> {
// max_bytes is 20_000 bytes: Outlook servers don't allow headers larger than 32k.
// 32 / 4 * 3 = 24k if you account for base64 encoding. To be safe, we reduced this to 20k.
if let Some(new_name) = self.recode_to_size(context, blob_abs, img_wh, Some(20_000))? {
if let Some(new_name) = self.recode_to_size(blob_abs, img_wh, Some(20_000))? {
self.name = new_name;
}
Ok(())
@@ -348,10 +352,7 @@ impl<'a> BlobObject<'a> {
MediaQuality::Worse => WORSE_IMAGE_SIZE,
};
if self
.recode_to_size(context, blob_abs, img_wh, None)?
.is_some()
{
if self.recode_to_size(blob_abs, img_wh, None)?.is_some() {
return Err(format_err!(
"Internal error: recode_to_size(..., None) shouldn't change the name of the image"
));
@@ -361,21 +362,20 @@ impl<'a> BlobObject<'a> {
fn recode_to_size(
&self,
context: &Context,
mut blob_abs: PathBuf,
mut img_wh: u32,
max_bytes: Option<usize>,
) -> Result<Option<String>> {
tokio::task::block_in_place(move || {
let mut img = image::open(&blob_abs).context("image recode failure")?;
let orientation = self.get_exif_orientation(context);
let orientation = self.get_exif_orientation();
let mut encoded = Vec::new();
let mut changed_name = None;
let exceeds_width = img.width() > img_wh || img.height() > img_wh;
let do_scale =
exceeds_width || encoded_img_exceeds_bytes(context, &img, max_bytes, &mut encoded)?;
exceeds_width || encoded_img_exceeds_bytes(&img, max_bytes, &mut encoded)?;
let do_rotate = matches!(orientation, Ok(90) | Ok(180) | Ok(270));
if do_scale || do_rotate {
@@ -398,7 +398,7 @@ impl<'a> BlobObject<'a> {
loop {
let new_img = img.thumbnail(img_wh, img_wh);
if encoded_img_exceeds_bytes(context, &new_img, max_bytes, &mut encoded)? {
if encoded_img_exceeds_bytes(&new_img, max_bytes, &mut encoded)? {
if img_wh < 20 {
return Err(format_err!(
"Failed to scale image to below {}B",
@@ -413,8 +413,7 @@ impl<'a> BlobObject<'a> {
}
info!(
context,
"Final scaled-down image size: {}B ({}px)",
"Final scaled-down image size: {}B ({}px).",
encoded.len(),
img_wh
);
@@ -443,7 +442,7 @@ impl<'a> BlobObject<'a> {
})
}
pub fn get_exif_orientation(&self, context: &Context) -> Result<i32> {
pub fn get_exif_orientation(&self) -> Result<i32> {
let file = std::fs::File::open(self.to_abs_path())?;
let mut bufreader = std::io::BufReader::new(&file);
let exifreader = exif::Reader::new();
@@ -455,7 +454,7 @@ impl<'a> BlobObject<'a> {
Some(3) => return Ok(180),
Some(6) => return Ok(90),
Some(8) => return Ok(270),
other => warn!(context, "exif orientation value ignored: {:?}", other),
other => warn!("Exif orientation value ignored: {other:?}."),
}
}
Ok(0)
@@ -468,6 +467,86 @@ impl<'a> fmt::Display for BlobObject<'a> {
}
}
/// All files in the blobdir.
///
/// This exists so we can have a [`BlobDirIter`] which needs something to own the data of
/// it's `&Path`. Use [`BlobDirContents::iter`] to create the iterator.
///
/// Additionally pre-allocating this means we get a length for progress report.
pub(crate) struct BlobDirContents<'a> {
inner: Vec<PathBuf>,
context: &'a Context,
}
impl<'a> BlobDirContents<'a> {
pub(crate) async fn new(context: &'a Context) -> Result<BlobDirContents<'a>> {
let readdir = fs::read_dir(context.get_blobdir()).await?;
let inner = ReadDirStream::new(readdir)
.filter_map(|entry| async move {
match entry {
Ok(entry) => Some(entry),
Err(err) => {
error!("Failed to read blob file: {err:#}.");
None
}
}
})
.filter_map(|entry| async move {
match entry.file_type().await.ok()?.is_file() {
true => Some(entry.path()),
false => {
warn!(
"Export: Found blob dir entry {} that is not a file, ignoring.",
entry.path().display()
);
None
}
}
})
.collect()
.await;
Ok(Self { inner, context })
}
pub(crate) fn iter(&self) -> BlobDirIter<'_> {
BlobDirIter::new(self.context, self.inner.iter())
}
pub(crate) fn len(&self) -> usize {
self.inner.len()
}
}
/// A iterator over all the [`BlobObject`]s in the blobdir.
pub(crate) struct BlobDirIter<'a> {
iter: std::slice::Iter<'a, PathBuf>,
context: &'a Context,
}
impl<'a> BlobDirIter<'a> {
fn new(context: &'a Context, iter: std::slice::Iter<'a, PathBuf>) -> BlobDirIter<'a> {
Self { iter, context }
}
}
impl<'a> Iterator for BlobDirIter<'a> {
type Item = BlobObject<'a>;
fn next(&mut self) -> Option<Self::Item> {
for path in self.iter.by_ref() {
// In theory this can error but we'd have corrupted filenames in the blobdir, so
// silently skipping them is fine.
match BlobObject::from_path(self.context, path) {
Ok(blob) => return Some(blob),
Err(err) => warn!("{err:#}"),
}
}
None
}
}
impl FusedIterator for BlobDirIter<'_> {}
fn encode_img(img: &DynamicImage, encoded: &mut Vec<u8>) -> anyhow::Result<()> {
encoded.clear();
let mut buf = Cursor::new(encoded);
@@ -475,7 +554,6 @@ fn encode_img(img: &DynamicImage, encoded: &mut Vec<u8>) -> anyhow::Result<()> {
Ok(())
}
fn encoded_img_exceeds_bytes(
context: &Context,
img: &DynamicImage,
max_bytes: Option<usize>,
encoded: &mut Vec<u8>,
@@ -484,8 +562,7 @@ fn encoded_img_exceeds_bytes(
encode_img(img, encoded)?;
if encoded.len() > max_bytes {
info!(
context,
"image size {}B ({}x{}px) exceeds {}B, need to scale down",
"Image size {}B ({}x{}px) exceeds {}B, need to scale down.",
encoded.len(),
img.width(),
img.height(),
@@ -739,7 +816,7 @@ mod tests {
let blob = BlobObject::new_from_path(&t, &avatar_blob).await.unwrap();
blob.recode_to_size(&t, blob.to_abs_path(), 1000, Some(3000))
blob.recode_to_size(blob.to_abs_path(), 1000, Some(3000))
.unwrap();
assert!(file_size(&avatar_blob).await <= 3000);
assert!(file_size(&avatar_blob).await > 2000);
@@ -923,7 +1000,7 @@ mod tests {
check_image_size(&file, original_width, original_height);
let blob = BlobObject::new_from_path(&alice, &file).await?;
assert_eq!(blob.get_exif_orientation(&alice).unwrap_or(0), orientation);
assert_eq!(blob.get_exif_orientation().unwrap_or(0), orientation);
let mut msg = Message::new(Viewtype::Image);
msg.set_file(file.to_str().unwrap(), None);
@@ -944,7 +1021,7 @@ mod tests {
let file = bob_msg.get_file(&bob).unwrap();
let blob = BlobObject::new_from_path(&bob, &file).await?;
assert_eq!(blob.get_exif_orientation(&bob).unwrap_or(0), 0);
assert_eq!(blob.get_exif_orientation().unwrap_or(0), 0);
let img = check_image_size(file, compressed_width, compressed_height);
Ok(img)

View File

@@ -10,6 +10,7 @@ use std::time::{Duration, SystemTime};
use anyhow::{bail, ensure, Context as _, Result};
use deltachat_derive::{FromSql, ToSql};
use serde::{Deserialize, Serialize};
use tracing::{error, info, warn};
use crate::aheader::EncryptPreference;
use crate::blob::BlobObject;
@@ -245,10 +246,7 @@ impl ChatId {
Contact::scaleup_origin_by_id(context, contact_id, Origin::CreateChat).await?;
chat_id
} else {
warn!(
context,
"Cannot create chat, contact {} does not exist.", contact_id,
);
warn!("Cannot create chat, contact {contact_id} does not exist.");
bail!("Can not create chat for non-existing contact");
}
}
@@ -284,12 +282,7 @@ impl ChatId {
let chat_id = ChatId::new(u32::try_from(row_id)?);
info!(
context,
"Created group/mailinglist '{}' grpid={} as {}, blocked={}",
grpname,
grpid,
chat_id,
create_blocked,
"Created group/mailinglist '{grpname}' grpid={grpid} as {chat_id}, blocked={create_blocked}."
);
Ok(chat_id)
@@ -336,16 +329,13 @@ impl ChatId {
Chattype::Single => {
for contact_id in get_chat_contacts(context, self).await? {
if contact_id != ContactId::SELF {
info!(
context,
"Blocking the contact {} to block 1:1 chat", contact_id
);
info!("Blocking the contact {contact_id} to block 1:1 chat.");
Contact::block(context, contact_id).await?;
}
}
}
Chattype::Group => {
info!(context, "Can't block groups yet, deleting the chat");
info!("Can't block groups yet, deleting the chat.");
self.delete(context).await?;
}
Chattype::Mailinglist => {
@@ -410,7 +400,7 @@ impl ChatId {
let chat = Chat::load_from_db(context, self).await?;
if protect == chat.protected {
info!(context, "Protection status unchanged for {}.", self);
info!("Protection status unchanged for {self}.");
return Ok(());
}
@@ -500,7 +490,7 @@ impl ChatId {
let chat = Chat::load_from_db(context, self).await?;
if let Err(e) = self.inner_set_protection(context, protect).await {
error!(context, "Cannot set protection: {}", e); // make error user-visible
error!("Cannot set protection: {e:#}."); // make error user-visible
return Err(e);
}
@@ -639,7 +629,10 @@ impl ChatId {
context.emit_msgs_changed_without_ids();
context.set_config(Config::LastHousekeeping, None).await?;
context.interrupt_inbox(InterruptInfo::new(false)).await;
context
.scheduler
.interrupt_inbox(InterruptInfo::new(false))
.await;
if chat.is_self_talk() {
let mut msg = Message::new(Viewtype::Text);
@@ -1065,10 +1058,7 @@ impl ChatId {
!self.is_special(),
"can not set gossiped timestamp for special chats"
);
info!(
context,
"set gossiped_timestamp for chat {} to {}.", self, timestamp,
);
info!("Set gossiped_timestamp for chat {self} to {timestamp}.");
context
.sql
@@ -1206,10 +1196,7 @@ impl Chat {
}
}
Err(err) => {
error!(
context,
"failed to load contacts for {}: {:#}", chat.id, err
);
error!("Failed to load contacts for {}: {:#}.", chat.id, err);
}
}
chat.name = chat_name;
@@ -1472,10 +1459,7 @@ impl Chat {
{
to_id = id;
} else {
error!(
context,
"Cannot send message, contact for {} not found.", self.id,
);
error!("Cannot send message, contact for {} not found.", self.id,);
bail!("Cannot set message, contact for {} not found.", self.id);
}
} else if self.typ == Chattype::Group
@@ -1667,7 +1651,7 @@ impl Chat {
maybe_set_logging_xdc(context, msg, self.id).await?;
}
context.interrupt_ephemeral_task().await;
context.scheduler.interrupt_ephemeral_task().await;
Ok(msg.id)
}
}
@@ -2029,8 +2013,8 @@ async fn prepare_msg_blob(context: &Context, msg: &mut Message) -> Result<()> {
.with_context(|| format!("attachment missing for message of type #{}", msg.viewtype))?;
if msg.viewtype == Viewtype::Image {
if let Err(e) = blob.recode_to_image_size(context).await {
warn!(context, "Cannot recode image, using original data: {:?}", e);
if let Err(err) = blob.recode_to_image_size(context).await {
warn!("Cannot recode image, using original data: {err:#}.");
}
}
msg.param.set(Param::File, blob.as_name());
@@ -2070,7 +2054,6 @@ async fn prepare_msg_blob(context: &Context, msg: &mut Message) -> Result<()> {
}
info!(
context,
"Attaching \"{}\" for message type #{}.",
blob.to_abs_path().display(),
msg.viewtype
@@ -2201,7 +2184,10 @@ async fn send_msg_inner(context: &Context, chat_id: ChatId, msg: &mut Message) -
context.emit_event(EventType::LocationChanged(Some(ContactId::SELF)));
}
context.interrupt_smtp(InterruptInfo::new(false)).await;
context
.scheduler
.interrupt_smtp(InterruptInfo::new(false))
.await;
}
Ok(msg.id)
@@ -2250,7 +2236,7 @@ async fn create_send_msg_job(context: &Context, msg_id: MsgId) -> Result<Option<
let attach_selfavatar = match shall_attach_selfavatar(context, msg.chat_id).await {
Ok(attach_selfavatar) => attach_selfavatar,
Err(err) => {
warn!(context, "job: cannot get selfavatar-state: {:#}", err);
warn!("SMTP job cannot get selfavatar-state: {err:#}.");
false
}
};
@@ -2275,10 +2261,7 @@ async fn create_send_msg_job(context: &Context, msg_id: MsgId) -> Result<Option<
if recipients.is_empty() {
// may happen eg. for groups with only SELF and bcc_self disabled
info!(
context,
"message {} has no recipient, skipping smtp-send", msg_id
);
info!("Message {msg_id} has no recipient, skipping smtp-send.");
msg_id.set_delivered(context).await?;
return Ok(None);
}
@@ -2312,27 +2295,27 @@ async fn create_send_msg_job(context: &Context, msg_id: MsgId) -> Result<Option<
if 0 != rendered_msg.last_added_location_id {
if let Err(err) = location::set_kml_sent_timestamp(context, msg.chat_id, time()).await {
error!(context, "Failed to set kml sent_timestamp: {:#}", err);
error!("Failed to set kml sent_timestamp: {err:#}.");
}
if !msg.hidden {
if let Err(err) =
location::set_msg_location_id(context, msg.id, rendered_msg.last_added_location_id)
.await
{
error!(context, "Failed to set msg_location_id: {:#}", err);
error!("Failed to set msg_location_id: {err:#}.");
}
}
}
if let Some(sync_ids) = rendered_msg.sync_ids_to_delete {
if let Err(err) = context.delete_sync_ids(sync_ids).await {
error!(context, "Failed to delete sync ids: {:#}", err);
error!("Failed to delete sync ids: {err:#}.");
}
}
if attach_selfavatar {
if let Err(err) = msg.chat_id.set_selfavatar_timestamp(context, time()).await {
error!(context, "Failed to set selfavatar timestamp: {:#}", err);
error!("Failed to set selfavatar timestamp: {err:#}.");
}
}
@@ -2686,9 +2669,7 @@ pub(crate) async fn mark_old_messages_as_noticed(
if !changed_chats.is_empty() {
info!(
context,
"Marking chats as noticed because there are newer outgoing messages: {:?}",
changed_chats
"Marking chats as noticed because there are newer outgoing messages: {changed_chats:?}."
);
}
@@ -3007,10 +2988,7 @@ pub(crate) async fn add_contact_to_chat_ex(
if context.is_self_addr(contact.get_addr()).await? {
// ourself is added using ContactId::SELF, do not add this address explicitly.
// if SELF is not in the group, members cannot be added at all.
warn!(
context,
"invalid attempt to add self e-mail address to group"
);
warn!("Invalid attempt to add self e-mail address to group.");
return Ok(false);
}
@@ -3023,10 +3001,7 @@ pub(crate) async fn add_contact_to_chat_ex(
if chat.is_protected()
&& contact.is_verified(context).await? != VerifiedStatus::BidirectVerified
{
error!(
context,
"Only bidirectional verified contacts can be added to protected chats."
);
error!("Only bidirectional verified contacts can be added to protected chats.");
return Ok(false);
}
if is_contact_in_chat(context, chat_id, contact_id).await? {
@@ -3433,7 +3408,10 @@ pub async fn forward_msgs(context: &Context, msg_ids: &[MsgId], chat_id: ChatId)
.await?;
curr_timestamp += 1;
if create_send_msg_job(context, new_msg_id).await?.is_some() {
context.interrupt_smtp(InterruptInfo::new(false)).await;
context
.scheduler
.interrupt_smtp(InterruptInfo::new(false))
.await;
}
}
created_chats.push(chat_id);
@@ -3488,7 +3466,10 @@ pub async fn resend_msgs(context: &Context, msg_ids: &[MsgId]) -> Result<()> {
msg_id: msg.id,
});
if create_send_msg_job(context, msg.id).await?.is_some() {
context.interrupt_smtp(InterruptInfo::new(false)).await;
context
.scheduler
.interrupt_smtp(InterruptInfo::new(false))
.await;
}
}
}
@@ -3550,7 +3531,7 @@ pub async fn add_device_msg_with_importance(
if let Some(label) = label {
if was_device_msg_ever_added(context, label).await? {
info!(context, "device-message {} already added", label);
info!("Device-message {label} already added.");
return Ok(msg_id);
}
}

View File

@@ -1,6 +1,7 @@
//! # Chat list module.
use anyhow::{ensure, Context as _, Result};
use tracing::warn;
use crate::chat::{update_special_chat_names, Chat, ChatId, ChatVisibility};
use crate::constants::{
@@ -176,7 +177,7 @@ impl Chatlist {
// allow searching over special names that may change at any time
// when the ui calls set_stock_translation()
if let Err(err) = update_special_chat_names(context).await {
warn!(context, "cannot update special chat names: {:?}", err)
warn!("Cannot update special chat names: {err:#}.")
}
let str_like_cmd = format!("%{query}%");

View File

@@ -443,7 +443,7 @@ impl Context {
Config::DeleteDeviceAfter => {
let ret = self.sql.set_raw_config(key.as_ref(), value).await;
// Interrupt ephemeral loop to delete old messages immediately.
self.interrupt_ephemeral_task().await;
self.scheduler.interrupt_ephemeral_task().await;
ret?
}
Config::Displayname => {

View File

@@ -13,6 +13,7 @@ use futures_lite::FutureExt as _;
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
use server_params::{expand_param_vector, ServerParams};
use tokio::task;
use tracing::{info, warn};
use crate::config::Config;
use crate::contact::addr_cmp;
@@ -59,7 +60,7 @@ impl Context {
/// Configures this account with the currently set parameters.
pub async fn configure(&self) -> Result<()> {
ensure!(
self.scheduler.read().await.is_none(),
!self.scheduler.is_running().await,
"cannot configure, already running"
);
ensure!(
@@ -100,7 +101,7 @@ impl Context {
}
async fn inner_configure(&self) -> Result<()> {
info!(self, "Configure ...");
info!("Configure ...");
let mut param = LoginParam::load_candidate_params(self).await?;
let old_addr = self.get_config(Config::ConfiguredAddr).await?;
@@ -125,13 +126,10 @@ async fn on_configure_completed(
if let Some(config_defaults) = &provider.config_defaults {
for def in config_defaults.iter() {
if !context.config_exists(def.key).await? {
info!(context, "apply config_defaults {}={}", def.key, def.value);
info!("apply config_defaults {}={}", def.key, def.value);
context.set_config(def.key, Some(def.value)).await?;
} else {
info!(
context,
"skip already set config_defaults {}={}", def.key, def.value
);
info!("skip already set config_defaults {}={}", def.key, def.value);
}
}
}
@@ -143,7 +141,7 @@ async fn on_configure_completed(
.await
.is_err()
{
warn!(context, "cannot add after_login_hint as core-provider-info");
warn!("cannot add after_login_hint as core-provider-info");
}
}
}
@@ -186,7 +184,7 @@ async fn configure(ctx: &Context, param: &mut LoginParam) -> Result<()> {
.await?
.and_then(|e| e.parse().ok())
{
info!(ctx, "Authorized address is {}", oauth2_addr);
info!("Authorized address is {oauth2_addr}");
param.addr = oauth2_addr;
ctx.sql
.set_raw_config("addr", Some(param.addr.as_str()))
@@ -215,22 +213,17 @@ async fn configure(ctx: &Context, param: &mut LoginParam) -> Result<()> {
{
// no advanced parameters entered by the user: query provider-database or do Autoconfig
info!(
ctx,
"checking internal provider-info for offline autoconfig"
);
info!("checking internal provider-info for offline autoconfig");
if let Some(provider) =
provider::get_provider_info(ctx, &param_domain, socks5_enabled).await
{
if let Some(provider) = provider::get_provider_info(&param_domain, socks5_enabled).await {
param.provider = Some(provider);
match provider.status {
provider::Status::Ok | provider::Status::Preparation => {
if provider.server.is_empty() {
info!(ctx, "offline autoconfig found, but no servers defined");
info!("offline autoconfig found, but no servers defined");
param_autoconfig = None;
} else {
info!(ctx, "offline autoconfig found");
info!("offline autoconfig found");
let servers = provider
.server
.iter()
@@ -257,17 +250,17 @@ async fn configure(ctx: &Context, param: &mut LoginParam) -> Result<()> {
}
}
provider::Status::Broken => {
info!(ctx, "offline autoconfig found, provider is broken");
info!("offline autoconfig found, provider is broken");
param_autoconfig = None;
}
}
} else {
// Try receiving autoconfig
info!(ctx, "no offline autoconfig found");
info!("no offline autoconfig found");
param_autoconfig = if socks5_enabled {
// Currently we can't do http requests through socks5, to not leak
// the ip, just don't do online autoconfig
info!(ctx, "socks5 enabled, skipping autoconfig");
info!("socks5 enabled, skipping autoconfig");
None
} else {
get_autoconfig(ctx, param, &param_domain, &param_addr_urlencoded).await
@@ -465,11 +458,13 @@ async fn configure(ctx: &Context, param: &mut LoginParam) -> Result<()> {
progress!(ctx, 920);
e2ee::ensure_secret_key_exists(ctx).await?;
info!(ctx, "key generation completed");
info!("key generation completed");
ctx.set_config_bool(Config::FetchedExistingMsgs, false)
.await?;
ctx.interrupt_inbox(InterruptInfo::new(false)).await;
ctx.scheduler
.interrupt_inbox(InterruptInfo::new(false))
.await;
progress!(ctx, 940);
update_device_chats_handle.await??;
@@ -576,13 +571,13 @@ async fn try_imap_one_param(
"None".to_string()
}
);
info!(context, "Trying: {}", inf);
info!("Trying: {}", inf);
let (_s, r) = async_channel::bounded(1);
let mut imap = match Imap::new(param, socks5_config.clone(), addr, provider_strict_tls, r) {
Err(err) => {
info!(context, "failure: {:#}", err);
info!("failure: {:#}", err);
return Err(ConfigurationError {
config: inf,
msg: format!("{err:#}"),
@@ -593,14 +588,14 @@ async fn try_imap_one_param(
match imap.connect(context).await {
Err(err) => {
info!(context, "failure: {:#}", err);
info!("failure: {:#}", err);
Err(ConfigurationError {
config: inf,
msg: format!("{err:#}"),
})
}
Ok(()) => {
info!(context, "success: {}", inf);
info!("success: {}", inf);
Ok(imap)
}
}
@@ -628,19 +623,19 @@ async fn try_smtp_one_param(
"None".to_string()
}
);
info!(context, "Trying: {}", inf);
info!("Trying: {inf}");
if let Err(err) = smtp
.connect(context, param, socks5_config, addr, provider_strict_tls)
.await
{
info!(context, "failure: {}", err);
info!("failure: {err:#}");
Err(ConfigurationError {
config: inf,
msg: format!("{err:#}"),
})
} else {
info!(context, "success: {}", inf);
info!("success: {inf}");
smtp.disconnect().await;
Ok(())
}

View File

@@ -1,10 +1,10 @@
//! # Thunderbird's Autoconfiguration implementation
//!
//! Documentation: <https://developer.mozilla.org/en-US/docs/Mozilla/Thunderbird/Autoconfiguration>
use quick_xml::events::{BytesStart, Event};
use std::io::BufRead;
use std::str::FromStr;
use quick_xml::events::{BytesStart, Event};
use tracing::warn;
use super::read_url::read_url;
use super::{Error, ServerParams};
@@ -263,10 +263,7 @@ pub(crate) async fn moz_autoconfigure(
let res = parse_serverparams(&param_in.addr, &xml_raw);
if let Err(err) = &res {
warn!(
context,
"Failed to parse Thunderbird autoconfiguration XML: {}", err
);
warn!("Failed to parse Thunderbird autoconfiguration XML: {err:#}");
}
res
}

View File

@@ -6,6 +6,7 @@
use std::io::BufRead;
use quick_xml::events::Event;
use tracing::warn;
use super::read_url::read_url;
use super::{Error, ServerParams};
@@ -202,7 +203,7 @@ pub(crate) async fn outlk_autodiscover(
let xml_raw = read_url(context, &url).await?;
let res = parse_xml(&xml_raw);
if let Err(err) = &res {
warn!(context, "{}", err);
warn!("{err:#}");
}
match res? {
ParsingResult::RedirectUrl(redirect_url) => url = redirect_url,

View File

@@ -1,4 +1,5 @@
use anyhow::{anyhow, format_err};
use tracing::info;
use crate::context::Context;
use crate::socks::Socks5Config;
@@ -6,12 +7,12 @@ use crate::socks::Socks5Config;
pub async fn read_url(context: &Context, url: &str) -> anyhow::Result<String> {
match read_url_inner(context, url).await {
Ok(s) => {
info!(context, "Successfully read url {}", url);
info!("Successfully read url {url}");
Ok(s)
}
Err(e) => {
info!(context, "Can't read URL {}: {:#}", url, e);
Err(format_err!("Can't read URL {}: {:#}", url, e))
info!("Can't read URL {url}: {e:#}");
Err(format_err!("Can't read URL {url}: {e:#}"))
}
}
}
@@ -32,7 +33,7 @@ pub async fn read_url_inner(context: &Context, url: &str) -> anyhow::Result<Stri
.last()
.ok_or_else(|| anyhow!("Redirection doesn't have a target location"))?
.to_str()?;
info!(context, "Following redirect to {}", header);
info!("Following redirect to {header}");
url = header.to_string();
continue;
}

View File

@@ -17,12 +17,16 @@ use rusqlite::OptionalExtension;
use serde::{Deserialize, Serialize};
use tokio::task;
use tokio::time::{timeout, Duration};
use tracing::instrument::WithSubscriber;
use tracing::Instrument;
use tracing::{info, warn};
use crate::aheader::EncryptPreference;
use crate::chat::ChatId;
use crate::color::str_to_color;
use crate::config::Config;
use crate::constants::{Blocked, Chattype, DC_GCL_ADD_SELF, DC_GCL_VERIFIED_ONLY};
use crate::context::future::ContextIdFutureExt;
use crate::context::Context;
use crate::events::EventType;
use crate::key::{DcKey, SignedPublicKey};
@@ -546,7 +550,7 @@ impl Contact {
// Filter out use-once addresses (like reply+AEJDGPOECLAP...@reply.github.com):
|| (addr.len() > 50 && addr.contains('+'))
{
info!(context, "hiding contact {}", addr);
info!("hiding contact {addr}");
origin = Origin::Hidden;
// For these kind of email addresses, sender and address often don't belong together
// (like hocuri <notifications@github.com>). In this example, hocuri shouldn't
@@ -699,7 +703,7 @@ impl Contact {
sth_modified = Modifier::Created;
row_id = u32::try_from(transaction.last_insert_rowid())?;
info!(context, "added contact id={} addr={}", row_id, &addr);
info!("added contact id={} addr={}", row_id, &addr);
}
Ok(row_id)
}).await?;
@@ -739,15 +743,12 @@ impl Contact {
}
}
Err(err) => {
warn!(
context,
"Failed to add address {} from address book: {}", addr, err
);
warn!("Failed to add address {addr} from address book: {err:#}");
}
}
}
Err(err) => {
warn!(context, "{:#}.", err);
warn!("{err:#}.");
}
}
}
@@ -1055,7 +1056,7 @@ impl Contact {
}
/// Updates `param` column in the database.
pub async fn update_param(&self, context: &Context) -> Result<()> {
pub(crate) async fn update_param(&self, context: &Context) -> Result<()> {
context
.sql
.execute(
@@ -1067,7 +1068,7 @@ impl Contact {
}
/// Updates `status` column in the database.
pub async fn update_status(&self, context: &Context) -> Result<()> {
pub(crate) async fn update_status(&self, context: &Context) -> Result<()> {
context
.sql
.execute(
@@ -1388,7 +1389,7 @@ pub(crate) async fn set_profile_image(
.set_config(Config::Selfavatar, Some(profile_image))
.await?;
} else {
info!(context, "Do not use unencrypted selfavatar.");
info!("Do not use unencrypted selfavatar.");
}
} else {
contact.param.set(Param::ProfileImage, profile_image);
@@ -1400,7 +1401,7 @@ pub(crate) async fn set_profile_image(
if was_encrypted {
context.set_config(Config::Selfavatar, None).await?;
} else {
info!(context, "Do not use unencrypted selfavatar deletion.");
info!("Do not use unencrypted selfavatar deletion.");
}
} else {
contact.param.remove(Param::ProfileImage);
@@ -1466,7 +1467,10 @@ pub(crate) async fn update_last_seen(
> 0
&& timestamp > time() - SEEN_RECENTLY_SECONDS
{
context.interrupt_recently_seen(contact_id, timestamp).await;
context
.scheduler
.interrupt_recently_seen(contact_id, timestamp)
.await;
}
Ok(())
}
@@ -1552,7 +1556,12 @@ impl RecentlySeenLoop {
pub(crate) fn new(context: Context) -> Self {
let (interrupt_send, interrupt_recv) = channel::bounded(1);
let handle = task::spawn(Self::run(context, interrupt_recv));
let handle = task::spawn(
Self::run(context, interrupt_recv)
.with_current_subscriber()
.bind_current_context_id()
.in_current_span(),
);
Self {
handle,
interrupt_send,
@@ -1604,7 +1613,6 @@ impl RecentlySeenLoop {
if let Ok(duration) = until.duration_since(now) {
info!(
context,
"Recently seen loop waiting for {} or interrupt",
duration_to_str(duration)
);
@@ -1618,10 +1626,7 @@ impl RecentlySeenLoop {
}
}
Ok(Err(err)) => {
warn!(
context,
"Error receiving an interruption in recently seen loop: {}", err
);
warn!("Error receiving an interruption in recently seen loop: {err:#}.");
// Maybe the sender side is closed.
// Terminate the loop to avoid looping indefinitely.
return;
@@ -1635,10 +1640,7 @@ impl RecentlySeenLoop {
}
}
} else {
info!(
context,
"Recently seen loop is not waiting, event is already due."
);
info!("Recently seen loop is not waiting, event is already due.");
// Event is already in the past.
if let Some(contact_id) = contact_id {

View File

@@ -13,23 +13,26 @@ use async_channel::{self as channel, Receiver, Sender};
use ratelimit::Ratelimit;
use tokio::sync::{Mutex, RwLock};
use tokio::task;
use tracing::{info, warn};
use crate::chat::{get_chat_cnt, ChatId};
use crate::config::Config;
use crate::constants::DC_VERSION_STR;
use crate::contact::Contact;
use crate::debug_logging::DebugEventLogData;
use crate::events::{Event, EventEmitter, EventType, Events};
use crate::events::{Event, EventEmitter, EventLayer, EventType, Events};
use crate::key::{DcKey, SignedPublicKey};
use crate::login_param::LoginParam;
use crate::message::{self, MessageState, MsgId};
use crate::quota::QuotaInfo;
use crate::scheduler::Scheduler;
use crate::scheduler::SchedulerState;
use crate::sql::Sql;
use crate::stock_str::StockStrings;
use crate::timesmearing::SmearedTimestamp;
use crate::tools::{duration_to_str, time};
pub mod future;
/// Builder for the [`Context`].
///
/// Many arguments to the [`Context`] are kind of optional and only needed to handle
@@ -191,6 +194,10 @@ pub struct InnerContext {
pub(crate) blobdir: PathBuf,
pub(crate) sql: Sql,
pub(crate) smeared_timestamp: SmearedTimestamp,
/// The global "ongoing" process state.
///
/// This is a global mutex-like state for operations which should be modal in the
/// clients.
running_state: RwLock<RunningState>,
/// Mutex to avoid generating the key for the user more than once.
pub(crate) generating_key_mutex: Mutex<()>,
@@ -201,7 +208,7 @@ pub struct InnerContext {
pub(crate) translated_stockstrings: StockStrings,
pub(crate) events: Events,
pub(crate) scheduler: RwLock<Option<Scheduler>>,
pub(crate) scheduler: SchedulerState,
pub(crate) ratelimit: RwLock<Ratelimit>,
/// Recently loaded quota information, if any.
@@ -229,12 +236,6 @@ pub struct InnerContext {
creation_time: SystemTime,
/// The text of the last error logged and emitted as an event.
/// If the ui wants to display an error after a failure,
/// `last_error` should be used to avoid races with the event thread.
pub(crate) last_error: std::sync::RwLock<String>,
/// If debug logging is enabled, this contains all necessary information
pub(crate) debug_logging: RwLock<Option<DebugLogging>>,
}
@@ -292,7 +293,7 @@ impl Context {
events: Events,
stock_strings: StockStrings,
) -> Result<Context> {
let context = Self::new_closed(dbfile, id, events, stock_strings).await?;
let context = Self::new_closed(dbfile, id, events.clone(), stock_strings).await?;
// Open the database if is not encrypted.
if context.check_passphrase("".to_string()).await? {
@@ -301,6 +302,11 @@ impl Context {
Ok(context)
}
/// Returns `tracing` subscriber layer.
pub fn to_layer(&self) -> EventLayer {
self.events.to_layer()
}
/// Creates new context without opening the database.
pub async fn new_closed(
dbfile: &Path,
@@ -370,7 +376,7 @@ impl Context {
wrong_pw_warning_mutex: Mutex::new(()),
translated_stockstrings: stockstrings,
events,
scheduler: RwLock::new(None),
scheduler: SchedulerState::new(),
ratelimit: RwLock::new(Ratelimit::new(Duration::new(60, 0), 6.0)), // Allow to send 6 messages immediately, no more than once every 10 seconds.
quota: RwLock::new(None),
quota_update_request: AtomicBool::new(false),
@@ -378,7 +384,6 @@ impl Context {
server_id: RwLock::new(None),
creation_time: std::time::SystemTime::now(),
last_full_folder_scan: Mutex::new(None),
last_error: std::sync::RwLock::new("".to_string()),
debug_logging: RwLock::new(None),
};
@@ -392,45 +397,26 @@ impl Context {
/// Starts the IO scheduler.
pub async fn start_io(&self) {
if let Ok(false) = self.is_configured().await {
warn!(self, "can not start io on a context that is not configured");
warn!("can not start io on a context that is not configured");
return;
}
info!(self, "starting IO");
let mut lock = self.inner.scheduler.write().await;
if lock.is_none() {
match Scheduler::start(self.clone()).await {
Err(err) => error!(self, "Failed to start IO: {:#}", err),
Ok(scheduler) => *lock = Some(scheduler),
}
}
self.scheduler.start(self.clone()).await;
}
/// Stops the IO scheduler.
pub async fn stop_io(&self) {
// Sending an event wakes up event pollers (get_next_event)
// so the caller of stop_io() can arrange for proper termination.
// For this, the caller needs to instruct the event poller
// to terminate on receiving the next event and then call stop_io()
// which will emit the below event(s)
info!(self, "stopping IO");
if let Some(debug_logging) = self.debug_logging.read().await.as_ref() {
debug_logging.loop_handle.abort();
}
if let Some(scheduler) = self.inner.scheduler.write().await.take() {
scheduler.stop(self).await;
}
self.scheduler.stop(self).await;
}
/// Restarts the IO scheduler if it was running before
/// when it is not running this is an no-op
pub async fn restart_io_if_running(&self) {
info!(self, "restarting IO");
let is_running = { self.inner.scheduler.read().await.is_some() };
if is_running {
self.stop_io().await;
self.start_io().await;
}
self.scheduler.restart(self).await;
}
/// Indicate that the network likely has come back.
pub async fn maybe_network(&self) {
self.scheduler.maybe_network().await;
}
/// Returns a reference to the underlying SQL instance.
@@ -521,6 +507,13 @@ impl Context {
// Ongoing process allocation/free/check
/// Tries to acquire the global UI "ongoing" mutex.
///
/// This is for modal operations during which no other user actions are allowed. Only
/// one such operation is allowed at any given time.
///
/// The return value is a cancel token, which will release the ongoing mutex when
/// dropped.
pub(crate) async fn alloc_ongoing(&self) -> Result<Receiver<()>> {
let mut s = self.running_state.write().await;
ensure!(
@@ -547,13 +540,13 @@ impl Context {
match &*s {
RunningState::Running { cancel_sender } => {
if let Err(err) = cancel_sender.send(()).await {
warn!(self, "could not cancel ongoing: {:#}", err);
warn!("could not cancel ongoing: {:#}", err);
}
info!(self, "Signaling the ongoing process to stop ASAP.",);
info!("Signaling the ongoing process to stop ASAP.",);
*s = RunningState::ShallStop;
}
RunningState::ShallStop | RunningState::Stopped => {
info!(self, "No ongoing process to stop.",);
info!("No ongoing process to stop.",);
}
}
}

83
src/context/future.rs Normal file
View File

@@ -0,0 +1,83 @@
//! Futures extensions to track current context ID.
use pin_project_lite::pin_project;
use std::cell::RefCell;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::thread_local;
thread_local! {
static THREAD_CONTEXT_ID: RefCell<u32> = RefCell::new(0);
}
pub(crate) struct ContextIdGuard {
previous: u32,
}
pub(crate) fn current_context_id() -> u32 {
THREAD_CONTEXT_ID.with(|context_id| *context_id.borrow())
}
impl ContextIdGuard {
fn new(context_id: u32) -> Self {
let previous = THREAD_CONTEXT_ID.with(|prev_context_id| {
let ret = *prev_context_id.borrow();
*prev_context_id.borrow_mut() = context_id;
ret
});
Self { previous }
}
}
impl Drop for ContextIdGuard {
fn drop(&mut self) {
THREAD_CONTEXT_ID.with(|context_id| {
*context_id.borrow_mut() = self.previous;
})
}
}
pin_project! {
/// A future with attached context ID.
#[derive(Debug, Clone)]
pub struct ContextIdFuture<F> {
context_id: u32,
#[pin]
future: F,
}
}
impl<F> ContextIdFuture<F> {
/// Wraps a future.
pub fn new(context_id: u32, future: F) -> Self {
Self { context_id, future }
}
}
impl<F: Future> Future for ContextIdFuture<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let context_id = self.context_id;
let this = self.project();
let _guard = ContextIdGuard::new(context_id);
this.future.poll(cx)
}
}
/// Future extension to bind context ID.
pub trait ContextIdFutureExt: Sized {
/// Binds context ID to the future.
fn bind_context_id(self, context_id: u32) -> ContextIdFuture<Self> {
ContextIdFuture::new(context_id, self)
}
/// Binds current context ID to the future.
fn bind_current_context_id(self) -> ContextIdFuture<Self> {
self.bind_context_id(current_context_id())
}
}
impl<F> ContextIdFutureExt for F where F: Future {}

View File

@@ -12,6 +12,7 @@ use async_channel::{self as channel, Receiver};
use serde_json::json;
use std::path::PathBuf;
use tokio::task;
use tracing::info;
/// Store all information needed to log an event to a webxdc.
pub struct DebugEventLogData {
@@ -132,7 +133,7 @@ pub(crate) async fn set_debug_logging_xdc(ctx: &Context, id: Option<MsgId>) -> a
});
}
}
info!(ctx, "replacing logging webxdc");
info!("replacing logging webxdc");
}
// Delete current debug logging
None => {
@@ -140,7 +141,7 @@ pub(crate) async fn set_debug_logging_xdc(ctx: &Context, id: Option<MsgId>) -> a
.set_raw_config(Config::DebugLogging.as_ref(), None)
.await?;
*ctx.debug_logging.write().await = None;
info!(ctx, "removing logging webxdc");
info!("removing logging webxdc");
}
}
Ok(())

View File

@@ -5,6 +5,7 @@ use std::str::FromStr;
use anyhow::Result;
use mailparse::ParsedMail;
use tracing::{info, warn};
use crate::aheader::Aheader;
use crate::authres::handle_authres;
@@ -24,7 +25,6 @@ use crate::pgp;
///
/// If the message is wrongly signed, HashSet will be empty.
pub fn try_decrypt(
context: &Context,
mail: &ParsedMail<'_>,
private_keyring: &Keyring<SignedSecretKey>,
public_keyring_for_validate: &Keyring<SignedPublicKey>,
@@ -36,7 +36,7 @@ pub fn try_decrypt(
None => return Ok(None),
Some(res) => res,
};
info!(context, "Detected Autocrypt-mime message");
info!("Detected Autocrypt-mime message");
decrypt_part(
encrypted_data_part,
@@ -54,7 +54,6 @@ pub(crate) async fn prepare_decryption(
if mail.headers.get_header(HeaderDef::ListPost).is_some() {
if mail.headers.get_header(HeaderDef::Autocrypt).is_some() {
info!(
context,
"Ignoring autocrypt header since this is a mailing list message. \
NOTE: For privacy reasons, the mailing list software should remove Autocrypt headers."
);
@@ -78,13 +77,13 @@ pub(crate) async fn prepare_decryption(
Ok(header) if addr_cmp(&header.addr, from) => Some(header),
Ok(header) => {
warn!(
context,
"Autocrypt header address {:?} is not {:?}.", header.addr, from
"Autocrypt header address {:?} is not {:?}.",
header.addr, from
);
None
}
Err(err) => {
warn!(context, "Failed to parse Autocrypt header: {:#}.", err);
warn!("Failed to parse Autocrypt header: {:#}.", err);
None
}
}
@@ -319,8 +318,8 @@ pub(crate) async fn get_autocrypt_peerstate(
peerstate.save_to_db(&context.sql).await?;
} else {
info!(
context,
"Refusing to update existing peerstate of {}", &peerstate.addr
"Refusing to update existing peerstate of {}",
&peerstate.addr
);
}
}

View File

@@ -6,6 +6,7 @@ use std::collections::BTreeMap;
use anyhow::{anyhow, Result};
use deltachat_derive::{FromSql, ToSql};
use serde::{Deserialize, Serialize};
use tracing::{info, warn};
use crate::config::Config;
use crate::context::Context;
@@ -124,7 +125,7 @@ impl Job {
/// Called in response to `Action::DownloadMsg`.
pub(crate) async fn download_msg(&self, context: &Context, imap: &mut Imap) -> Status {
if let Err(err) = imap.prepare(context).await {
warn!(context, "download: could not connect: {:#}", err);
warn!("download: could not connect: {:#}", err);
return Status::RetryNow;
}
@@ -195,7 +196,7 @@ impl Imap {
}
// we are connected, and the folder is selected
info!(context, "Downloading message {}/{} fully...", folder, uid);
info!("Downloading message {}/{} fully...", folder, uid);
let mut uid_message_ids: BTreeMap<u32, String> = BTreeMap::new();
uid_message_ids.insert(uid, rfc724_mid);
@@ -240,7 +241,7 @@ impl MimeMessage {
text += format!(" [{until}]").as_str();
};
info!(context, "Partial download: {}", text);
info!("Partial download: {}", text);
self.parts.push(Part {
typ: Viewtype::Text,

View File

@@ -2,6 +2,7 @@
use anyhow::{format_err, Context as _, Result};
use num_traits::FromPrimitive;
use tracing::info;
use crate::aheader::{Aheader, EncryptPreference};
use crate::config::Config;
@@ -51,7 +52,6 @@ impl EncryptHelper {
/// Returns an error if `e2ee_guaranteed` is true, but one or more keys are missing.
pub fn should_encrypt(
&self,
context: &Context,
e2ee_guaranteed: bool,
peerstates: &[(Option<Peerstate>, &str)],
) -> Result<bool> {
@@ -63,10 +63,7 @@ impl EncryptHelper {
for (peerstate, addr) in peerstates {
match peerstate {
Some(peerstate) => {
info!(
context,
"peerstate for {:?} is {}", addr, peerstate.prefer_encrypt
);
info!("peerstate for {:?} is {}", addr, peerstate.prefer_encrypt);
match peerstate.prefer_encrypt {
EncryptPreference::NoPreference => {}
EncryptPreference::Mutual => prefer_encrypt_count += 1,
@@ -82,7 +79,7 @@ impl EncryptHelper {
if e2ee_guaranteed {
return Err(format_err!("{}", msg));
} else {
info!(context, "{}", msg);
info!("{}", msg);
return Ok(false);
}
}
@@ -322,22 +319,22 @@ Sent with my Delta Chat Messenger: https://delta.chat";
// test with EncryptPreference::NoPreference:
// if e2ee_eguaranteed is unset, there is no encryption as not more than half of peers want encryption
let ps = new_peerstates(EncryptPreference::NoPreference);
assert!(encrypt_helper.should_encrypt(&t, true, &ps).unwrap());
assert!(!encrypt_helper.should_encrypt(&t, false, &ps).unwrap());
assert!(encrypt_helper.should_encrypt(true, &ps).unwrap());
assert!(!encrypt_helper.should_encrypt(false, &ps).unwrap());
// test with EncryptPreference::Reset
let ps = new_peerstates(EncryptPreference::Reset);
assert!(encrypt_helper.should_encrypt(&t, true, &ps).unwrap());
assert!(!encrypt_helper.should_encrypt(&t, false, &ps).unwrap());
assert!(encrypt_helper.should_encrypt(true, &ps).unwrap());
assert!(!encrypt_helper.should_encrypt(false, &ps).unwrap());
// test with EncryptPreference::Mutual (self is also Mutual)
let ps = new_peerstates(EncryptPreference::Mutual);
assert!(encrypt_helper.should_encrypt(&t, true, &ps).unwrap());
assert!(encrypt_helper.should_encrypt(&t, false, &ps).unwrap());
assert!(encrypt_helper.should_encrypt(true, &ps).unwrap());
assert!(encrypt_helper.should_encrypt(false, &ps).unwrap());
// test with missing peerstate
let ps = vec![(None, "bob@foo.bar")];
assert!(encrypt_helper.should_encrypt(&t, true, &ps).is_err());
assert!(!encrypt_helper.should_encrypt(&t, false, &ps).unwrap());
assert!(encrypt_helper.should_encrypt(true, &ps).is_err());
assert!(!encrypt_helper.should_encrypt(false, &ps).unwrap());
}
}

View File

@@ -72,6 +72,7 @@ use anyhow::{ensure, Result};
use async_channel::Receiver;
use serde::{Deserialize, Serialize};
use tokio::time::timeout;
use tracing::{error, info, warn};
use crate::chat::{send_msg, ChatId};
use crate::constants::{DC_CHAT_ID_LAST_SPECIAL, DC_CHAT_ID_TRASH};
@@ -225,8 +226,8 @@ impl ChatId {
msg.param.set_cmd(SystemMessage::EphemeralTimerChanged);
if let Err(err) = send_msg(context, self, &mut msg).await {
error!(
context,
"Failed to send a message about ephemeral message timer change: {:?}", err
"Failed to send a message about ephemeral message timer change: {:?}",
err
);
}
}
@@ -317,7 +318,7 @@ impl MsgId {
paramsv![ephemeral_timestamp, ephemeral_timestamp, self],
)
.await?;
context.interrupt_ephemeral_task().await;
context.scheduler.interrupt_ephemeral_task().await;
}
Ok(())
}
@@ -345,7 +346,7 @@ pub(crate) async fn start_ephemeral_timers_msgids(
)
.await?;
if count > 0 {
context.interrupt_ephemeral_task().await;
context.scheduler.interrupt_ephemeral_task().await;
}
Ok(())
}
@@ -433,7 +434,7 @@ pub(crate) async fn delete_expired_messages(context: &Context, now: i64) -> Resu
let rows = select_expired_messages(context, now).await?;
if !rows.is_empty() {
info!(context, "Attempting to delete {} messages.", rows.len());
info!("Attempting to delete {} messages.", rows.len());
let (msgs_changed, webxdc_deleted) = context
.sql
@@ -523,7 +524,7 @@ async fn next_expiration_timestamp(context: &Context) -> Option<i64> {
.await
{
Err(err) => {
warn!(context, "Can't calculate next ephemeral timeout: {}", err);
warn!("Can't calculate next ephemeral timeout: {}", err);
None
}
Ok(ephemeral_timestamp) => ephemeral_timestamp,
@@ -533,8 +534,8 @@ async fn next_expiration_timestamp(context: &Context) -> Option<i64> {
match next_delete_device_after_timestamp(context).await {
Err(err) => {
warn!(
context,
"Can't calculate timestamp of the next message expiration: {}", err
"Can't calculate timestamp of the next message expiration: {}",
err
);
None
}
@@ -563,7 +564,6 @@ pub(crate) async fn ephemeral_loop(context: &Context, interrupt_receiver: Receiv
if let Ok(duration) = until.duration_since(now) {
info!(
context,
"Ephemeral loop waiting for deletion in {} or interrupt",
duration_to_str(duration)
);

View File

@@ -1,10 +1,17 @@
#![allow(missing_docs)]
//! # Events specification.
use std::fmt::Write;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use async_channel::{self as channel, Receiver, Sender, TrySendError};
use serde::Serialize;
use tracing::Level;
use tracing_subscriber::registry::LookupSpan;
use tracing_subscriber::Layer;
use crate::chat::ChatId;
use crate::contact::ContactId;
use crate::ephemeral::Timer as EphemeralTimer;
@@ -16,6 +23,11 @@ use crate::webxdc::StatusUpdateSerial;
pub struct Events {
receiver: Receiver<Event>,
sender: Sender<Event>,
/// The text of the last error logged and emitted as an event.
/// If the ui wants to display an error after a failure,
/// `last_error` should be used to avoid races with the event thread.
pub(crate) last_error: Arc<RwLock<String>>,
}
impl Default for Events {
@@ -29,11 +41,26 @@ impl Events {
pub fn new() -> Self {
let (sender, receiver) = channel::bounded(1_000);
Self { receiver, sender }
Self {
receiver,
sender,
last_error: Arc::new(RwLock::new("".to_string())),
}
}
/// Emits an event.
/// Set last error string.
/// Implemented as blocking as used from macros in different, not always async blocks.
pub fn set_last_error(&self, error: &str) {
let error = error.to_string();
let mut last_error = self.last_error.write().unwrap();
*last_error = error;
}
/// Emits an event into event channel.
pub fn emit(&self, event: Event) {
if let EventType::Error(formatted) = &event.typ {
self.set_last_error(formatted);
}
match self.sender.try_send(event) {
Ok(()) => {}
Err(TrySendError::Full(event)) => {
@@ -53,6 +80,89 @@ impl Events {
pub fn get_emitter(&self) -> EventEmitter {
EventEmitter(self.receiver.clone())
}
/// Returns `tracing` subscriber layer.
pub fn to_layer(&self) -> EventLayer {
EventLayer::new(self.sender.clone(), self.last_error.clone())
}
}
#[derive(Debug)]
struct MessageStorage<'a>(&'a mut String);
impl tracing::field::Visit for MessageStorage<'_> {
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
if field.name() == "message" {
write!(self.0, "{value:?}").ok();
}
}
}
/// Tracing subscriber layer converting logs into Delta Chat events.
#[derive(Debug)]
pub struct EventLayer {
/// Event channel for event submission.
sender: Sender<Event>,
last_error: Arc<RwLock<String>>,
}
impl EventLayer {
pub(crate) fn new(sender: Sender<Event>, last_error: Arc<RwLock<String>>) -> Self {
Self { sender, last_error }
}
fn set_last_error(&self, error: &str) {
let error = error.to_string();
let mut last_error = self.last_error.write().unwrap();
*last_error = error;
}
}
impl<S> Layer<S> for EventLayer
where
S: tracing::Subscriber + for<'a> LookupSpan<'a>,
{
fn on_event(
&self,
event: &tracing::Event<'_>,
_ctx: tracing_subscriber::layer::Context<'_, S>,
) {
let context_id = crate::context::future::current_context_id();
let &level = event.metadata().level();
let mut message = "".to_string();
let mut visitor = MessageStorage(&mut message);
event.record(&mut visitor);
match level {
Level::ERROR => {
self.set_last_error(&message);
self.sender
.try_send(Event {
id: context_id,
typ: EventType::Error(message),
})
.ok();
}
Level::WARN => {
self.sender
.try_send(Event {
id: context_id,
typ: EventType::Warning(message),
})
.ok();
}
Level::INFO => {
self.sender
.try_send(Event {
id: context_id,
typ: EventType::Info(message),
})
.ok();
}
Level::TRACE | Level::DEBUG => {}
}
}
}
/// A receiver of events from a [`Context`].
@@ -133,6 +243,9 @@ pub enum EventType {
/// Emitted when an IMAP message has been moved
ImapMessageMoved(String),
/// Emitted before going into IDLE on the Inbox folder.
ImapInboxIdle,
/// Emitted when an new file in the $BLOBDIR was created
NewBlobFile(String),

View File

@@ -16,6 +16,7 @@ use futures::future::FutureExt;
use lettre_email::mime::{self, Mime};
use lettre_email::PartBuilder;
use mailparse::ParsedContentType;
use tracing::warn;
use crate::headerdef::{HeaderDef, HeaderDefMap};
use crate::message::{Message, MsgId};
@@ -87,7 +88,7 @@ impl HtmlMsgParser {
/// Function takes a raw mime-message string,
/// searches for the main-text part
/// and returns that as parser.html
pub async fn from_bytes(context: &Context, rawmime: &[u8]) -> Result<Self> {
pub async fn from_bytes(rawmime: &[u8]) -> Result<Self> {
let mut parser = HtmlMsgParser {
html: "".to_string(),
plain: None,
@@ -102,7 +103,7 @@ impl HtmlMsgParser {
parser.html = plain.to_html();
}
} else {
parser.cid_to_data_recursive(context, &parsedmail).await?;
parser.cid_to_data_recursive(&parsedmail).await?;
}
Ok(parser)
@@ -173,7 +174,6 @@ impl HtmlMsgParser {
/// This allows the final html-file to be self-contained.
fn cid_to_data_recursive<'a>(
&'a mut self,
context: &'a Context,
mail: &'a mailparse::ParsedMail<'a>,
) -> Pin<Box<dyn Future<Output = Result<()>> + 'a + Send>> {
// Boxed future to deal with recursion
@@ -181,7 +181,7 @@ impl HtmlMsgParser {
match get_mime_multipart_type(&mail.ctype) {
MimeMultipartType::Multiple => {
for cur_data in &mail.subparts {
self.cid_to_data_recursive(context, cur_data).await?;
self.cid_to_data_recursive(cur_data).await?;
}
Ok(())
}
@@ -191,7 +191,7 @@ impl HtmlMsgParser {
return Ok(());
}
let mail = mailparse::parse_mail(&raw).context("failed to parse mail")?;
self.cid_to_data_recursive(context, &mail).await
self.cid_to_data_recursive(&mail).await
}
MimeMultipartType::Single => {
let mimetype = mail.ctype.mimetype.parse::<Mime>()?;
@@ -214,10 +214,8 @@ impl HtmlMsgParser {
.to_string()
}
Err(e) => warn!(
context,
"Cannot create regex for cid: {} throws {}",
re_string,
e
re_string, e
),
}
}
@@ -249,15 +247,15 @@ impl MsgId {
let rawmime = message::get_mime_headers(context, self).await?;
if !rawmime.is_empty() {
match HtmlMsgParser::from_bytes(context, &rawmime).await {
match HtmlMsgParser::from_bytes(&rawmime).await {
Err(err) => {
warn!(context, "get_html: parser error: {:#}", err);
warn!("get_html: parser error: {:#}", err);
Ok(None)
}
Ok(parser) => Ok(Some(parser.html)),
}
} else {
warn!(context, "get_html: no mime for {}", self);
warn!("get_html: no mime for {}", self);
Ok(None)
}
}
@@ -286,9 +284,8 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_htmlparse_plain_unspecified() {
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/text_plain_unspecified.eml");
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
assert_eq!(
parser.html,
r##"<!DOCTYPE html>
@@ -305,9 +302,8 @@ This message does not have Content-Type nor Subject.<br/>
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_htmlparse_plain_iso88591() {
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/text_plain_iso88591.eml");
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
assert_eq!(
parser.html,
r##"<!DOCTYPE html>
@@ -324,9 +320,8 @@ message with a non-UTF-8 encoding: äöüßÄÖÜ<br/>
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_htmlparse_plain_flowed() {
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/text_plain_flowed.eml");
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
assert!(parser.plain.unwrap().flowed);
assert_eq!(
parser.html,
@@ -347,9 +342,8 @@ and will be wrapped as usual.<br/>
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_htmlparse_alt_plain() {
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/text_alt_plain.eml");
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
assert_eq!(
parser.html,
r##"<!DOCTYPE html>
@@ -369,9 +363,8 @@ test some special html-characters as &lt; &gt; and &amp; but also &quot; and &#x
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_htmlparse_html() {
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/text_html.eml");
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
// on windows, `\r\n` linends are returned from mimeparser,
// however, rust multiline-strings use just `\n`;
@@ -387,9 +380,8 @@ test some special html-characters as &lt; &gt; and &amp; but also &quot; and &#x
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_htmlparse_alt_html() {
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/text_alt_html.eml");
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
assert_eq!(
parser.html.replace('\r', ""), // see comment in test_htmlparse_html()
r##"<html>
@@ -402,9 +394,8 @@ test some special html-characters as &lt; &gt; and &amp; but also &quot; and &#x
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_htmlparse_alt_plain_html() {
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/text_alt_plain_html.eml");
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
assert_eq!(
parser.html.replace('\r', ""), // see comment in test_htmlparse_html()
r##"<html>
@@ -421,7 +412,6 @@ test some special html-characters as &lt; &gt; and &amp; but also &quot; and &#x
async fn test_htmlparse_apple_cid_jpg() {
// load raw mime html-data with related image-part (cid:)
// and make sure, Content-Id has angle-brackets that are removed correctly.
let t = TestContext::new().await;
let raw = include_bytes!("../test-data/message/apple_cid_jpg.eml");
let test = String::from_utf8_lossy(raw);
assert!(test.contains("Content-Id: <8AE052EF-BC90-486F-BB78-58D3590308EC@fritz.box>"));
@@ -429,7 +419,7 @@ test some special html-characters as &lt; &gt; and &amp; but also &quot; and &#x
assert!(test.find("data:").is_none());
// parsing converts cid: to data:
let parser = HtmlMsgParser::from_bytes(&t.ctx, raw).await.unwrap();
let parser = HtmlMsgParser::from_bytes(raw).await.unwrap();
assert!(parser.html.contains("<html>"));
assert!(!parser.html.contains("Content-Id:"));
assert!(parser.html.contains("data:image/jpeg;base64,/9j/4AAQ"));

View File

@@ -16,6 +16,7 @@ use async_channel::Receiver;
use async_imap::types::{Fetch, Flag, Name, NameAttribute, UnsolicitedResponse};
use futures::StreamExt;
use num_traits::FromPrimitive;
use tracing::{error, info, warn};
use crate::chat::{self, ChatId, ChatIdBlocked};
use crate::config::Config;
@@ -306,7 +307,7 @@ impl Imap {
let oauth2 = self.config.lp.oauth2;
info!(context, "Connecting to IMAP server");
info!("Connecting to IMAP server");
let connection_res: Result<Client> = if self.config.lp.security == Socket::Starttls
|| self.config.lp.security == Socket::Plain
{
@@ -363,7 +364,7 @@ impl Imap {
let imap_pw: &str = config.lp.password.as_ref();
let login_res = if oauth2 {
info!(context, "Logging into IMAP server with OAuth 2");
info!("Logging into IMAP server with OAuth 2");
let addr: &str = config.addr.as_ref();
let token = get_oauth2_access_token(context, addr, imap_pw, true)
@@ -375,7 +376,7 @@ impl Imap {
};
client.authenticate("XOAUTH2", auth).await
} else {
info!(context, "Logging into IMAP server with LOGIN");
info!("Logging into IMAP server with LOGIN");
client.login(imap_user, imap_pw).await
};
@@ -391,7 +392,7 @@ impl Imap {
"IMAP-LOGIN as {}",
self.config.lp.user
)));
info!(context, "Successfully logged into IMAP server");
info!("Successfully logged into IMAP server");
Ok(())
}
@@ -399,7 +400,7 @@ impl Imap {
let imap_user = self.config.lp.user.to_owned();
let message = stock_str::cannot_login(context, &imap_user).await;
warn!(context, "{} ({:#})", message, err);
warn!("{} ({:#})", message, err);
let lock = context.wrong_pw_warning_mutex.lock().await;
if self.login_failed_once
@@ -407,7 +408,7 @@ impl Imap {
&& context.get_config_bool(Config::NotifyAboutWrongPw).await?
{
if let Err(e) = context.set_config(Config::NotifyAboutWrongPw, None).await {
warn!(context, "{:#}", e);
warn!("{:#}", e);
}
drop(lock);
@@ -417,7 +418,7 @@ impl Imap {
chat::add_device_msg_with_importance(context, None, Some(&mut msg), true)
.await
{
warn!(context, "{:#}", e);
warn!("{:#}", e);
}
} else {
self.login_failed_once = true;
@@ -445,8 +446,8 @@ impl Imap {
/// Drops the session without disconnecting properly.
/// Useful in case of an IMAP error, when it's unclear if it's in a correct state and it's
/// easier to setup a new connection.
pub fn trigger_reconnect(&mut self, context: &Context) {
info!(context, "Dropping an IMAP connection.");
pub fn trigger_reconnect(&mut self) {
info!("Dropping an IMAP connection.");
self.session = None;
}
@@ -475,7 +476,7 @@ impl Imap {
// Note that the `Config::DeleteDeviceAfter` timer starts as soon as the messages are
// fetched while the per-chat ephemeral timers start as soon as the messages are marked
// as noticed.
context.interrupt_ephemeral_task().await;
context.scheduler.interrupt_ephemeral_task().await;
}
let session = self
@@ -510,7 +511,7 @@ impl Imap {
.as_mut()
.context("IMAP No connection established")?;
session.select_folder(context, Some(folder)).await?;
session.select_folder(Some(folder)).await?;
let mut list = session
.uid_fetch("1:*", RFC724MID_UID)
@@ -521,7 +522,7 @@ impl Imap {
let headers = match get_fetch_headers(&fetch) {
Ok(headers) => headers,
Err(err) => {
warn!(context, "Failed to parse FETCH headers: {}", err);
warn!("Failed to parse FETCH headers: {}", err);
continue;
}
};
@@ -539,7 +540,6 @@ impl Imap {
}
info!(
context,
"Resync: collected {} message IDs in folder {}",
msgs.len(),
folder,
@@ -582,7 +582,7 @@ impl Imap {
) -> Result<bool> {
let session = self.session.as_mut().context("no session")?;
let newly_selected = session
.select_or_create_folder(context, folder)
.select_or_create_folder(folder)
.await
.with_context(|| format!("failed to select or create folder {folder}"))?;
let mailbox = session
@@ -611,7 +611,6 @@ impl Imap {
} else if let Some(uid_next) = mailbox.uid_next {
if uid_next < old_uid_next {
warn!(
context,
"The server illegally decreased the uid_next of folder {} from {} to {} without changing validity ({}), resyncing UIDs...",
folder, old_uid_next, uid_next, new_uid_validity,
);
@@ -629,7 +628,7 @@ impl Imap {
set_modseq(context, folder, 0).await?;
if mailbox.exists == 0 {
info!(context, "Folder \"{}\" is empty.", folder);
info!("Folder \"{}\" is empty.", folder);
// set uid_next=1 for empty folders.
// If we do not do this here, we'll miss the first message
@@ -645,10 +644,7 @@ impl Imap {
let new_uid_next = match mailbox.uid_next {
Some(uid_next) => uid_next,
None => {
warn!(
context,
"IMAP folder has no uid_next, fall back to fetching"
);
warn!("IMAP folder has no uid_next, fall back to fetching");
// note that we use fetch by sequence number
// and thus we only need to get exactly the
// last-index message.
@@ -685,13 +681,8 @@ impl Imap {
job::schedule_resync(context).await?;
}
info!(
context,
"uid/validity change folder {}: new {}/{} previous {}/{}",
folder,
new_uid_next,
new_uid_validity,
old_uid_next,
old_uid_validity,
folder, new_uid_next, new_uid_validity, old_uid_next, old_uid_validity,
);
Ok(false)
}
@@ -707,7 +698,7 @@ impl Imap {
fetch_existing_msgs: bool,
) -> Result<bool> {
if should_ignore_folder(context, folder, folder_meaning).await? {
info!(context, "Not fetching from {}", folder);
info!("Not fetching from {}", folder);
return Ok(false);
}
@@ -717,7 +708,7 @@ impl Imap {
.with_context(|| format!("failed to select folder {folder}"))?;
if !new_emails && !fetch_existing_msgs {
info!(context, "No new emails in folder {}", folder);
info!("No new emails in folder {}", folder);
return Ok(false);
}
@@ -743,7 +734,7 @@ impl Imap {
let headers = match get_fetch_headers(fetch_response) {
Ok(headers) => headers,
Err(err) => {
warn!(context, "Failed to parse FETCH headers: {}", err);
warn!("Failed to parse FETCH headers: {}", err);
continue;
}
};
@@ -843,7 +834,7 @@ impl Imap {
set_uid_next(context, folder, new_uid_next).await?;
}
info!(context, "{} mails read from \"{}\".", read_cnt, folder);
info!("{} mails read from \"{}\".", read_cnt, folder);
let msg_ids: Vec<MsgId> = received_msgs
.iter()
@@ -890,10 +881,7 @@ impl Imap {
None => continue,
};
if let Some(folder) = context.get_config(config).await? {
info!(
context,
"Fetching existing messages from folder \"{}\"", folder
);
info!("Fetching existing messages from folder \"{}\"", folder);
self.fetch_new_messages(context, &folder, meaning, true)
.await
.context("could not fetch existing messages")?;
@@ -901,7 +889,7 @@ impl Imap {
}
}
info!(context, "Done fetching existing messages.");
info!("Done fetching existing messages.");
Ok(())
}
@@ -986,21 +974,15 @@ impl Session {
Err(err) => {
if context.should_delete_to_trash().await? {
error!(
context,
"Cannot move messages {} to {}, no fallback to COPY/DELETE because \
delete_to_trash is set. Error: {:#}",
set,
target,
err,
set, target, err,
);
return Err(err.into());
}
warn!(
context,
"Cannot move messages, fallback to COPY/DELETE {} to {}: {}",
set,
target,
err
set, target, err
);
}
}
@@ -1011,14 +993,14 @@ impl Session {
let copy = !context.is_trash(target).await?;
if copy {
info!(
context,
"Server does not support MOVE, fallback to COPY/DELETE {} to {}", set, target
"Server does not support MOVE, fallback to COPY/DELETE {} to {}",
set, target
);
self.uid_copy(&set, &target).await?;
} else {
error!(
context,
"Server does not support MOVE, fallback to DELETE {} to {}", set, target,
"Server does not support MOVE, fallback to DELETE {} to {}",
set, target,
);
}
context
@@ -1067,7 +1049,7 @@ impl Session {
// MOVE/DELETE operations. This does not result in multiple SELECT commands
// being sent because `select_folder()` does nothing if the folder is already
// selected.
self.select_folder(context, Some(folder)).await?;
self.select_folder(Some(folder)).await?;
// Empty target folder name means messages should be deleted.
if target.is_empty() {
@@ -1088,8 +1070,8 @@ impl Session {
// Expunge folder if needed, e.g. if some jobs have
// deleted messages on the server.
if let Err(err) = self.maybe_close_folder(context).await {
warn!(context, "failed to close folder: {:?}", err);
if let Err(err) = self.maybe_close_folder().await {
warn!("failed to close folder: {:?}", err);
}
Ok(())
@@ -1115,23 +1097,17 @@ impl Session {
.await?;
for (folder, rowid_set, uid_set) in UidGrouper::from(rows) {
self.select_folder(context, Some(&folder))
self.select_folder(Some(&folder))
.await
.context("failed to select folder")?;
if let Err(err) = self.add_flag_finalized_with_set(&uid_set, "\\Seen").await {
warn!(
context,
"Cannot mark messages {} in folder {} as seen, will retry later: {}.",
uid_set,
folder,
err
uid_set, folder, err
);
} else {
info!(
context,
"Marked messages {} in folder {} as seen.", uid_set, folder
);
info!("Marked messages {} in folder {} as seen.", uid_set, folder);
context
.sql
.execute(
@@ -1159,15 +1135,12 @@ impl Imap {
.with_context(|| format!("No IMAP connection established, folder: {folder}"))?;
if !session.can_condstore() {
info!(
context,
"Server does not support CONDSTORE, skipping flag synchronization."
);
info!("Server does not support CONDSTORE, skipping flag synchronization.");
return Ok(());
}
session
.select_folder(context, Some(folder))
.select_folder(Some(folder))
.await
.context("failed to select folder")?;
@@ -1180,8 +1153,8 @@ impl Imap {
// We are not interested in actual value of HIGHESTMODSEQ.
if mailbox.highest_modseq.is_none() {
info!(
context,
"Mailbox {} does not support mod-sequences, skipping flag synchronization.", folder
"Mailbox {} does not support mod-sequences, skipping flag synchronization.",
folder
);
return Ok(());
}
@@ -1203,7 +1176,7 @@ impl Imap {
let uid = if let Some(uid) = fetch.uid {
uid
} else {
info!(context, "FETCH result contains no UID, skipping");
info!("FETCH result contains no UID, skipping");
continue;
};
let is_seen = fetch.flags().any(|flag| flag == Flag::Seen);
@@ -1223,7 +1196,7 @@ impl Imap {
highest_modseq = modseq;
}
} else {
warn!(context, "FETCH result contains no MODSEQ");
warn!("FETCH result contains no MODSEQ");
}
}
@@ -1269,7 +1242,7 @@ impl Imap {
}
}
Err(err) => {
warn!(context, "{}", err);
warn!("{}", err);
continue;
}
};
@@ -1366,7 +1339,6 @@ impl Imap {
let session = self.session.as_mut().context("no IMAP session")?;
for (request_uids, set) in build_sequence_sets(&request_uids)? {
info!(
context,
"Starting a {} FETCH of message set \"{}\".",
if fetch_partially { "partial" } else { "full" },
set
@@ -1407,7 +1379,7 @@ impl Imap {
let next_fetch_response = match next_fetch_response {
Ok(next_fetch_response) => next_fetch_response,
Err(err) => {
warn!(context, "Failed to process IMAP FETCH result: {}.", err);
warn!("Failed to process IMAP FETCH result: {}.", err);
continue;
}
};
@@ -1423,24 +1395,21 @@ impl Imap {
// another client changes \Seen flag on a message after we do a prefetch but
// before fetch. It's not an error if we receive such unsolicited response.
info!(
context,
"Skipping not requested FETCH response for UID {}.", next_uid
"Skipping not requested FETCH response for UID {}.",
next_uid
);
} else if uid_msgs.insert(next_uid, next_fetch_response).is_some() {
warn!(context, "Got duplicated UID {}.", next_uid);
warn!("Got duplicated UID {}.", next_uid);
}
} else {
info!(context, "Skipping FETCH response without UID.");
info!("Skipping FETCH response without UID.");
}
}
let fetch_response = match fetch_response {
Some(fetch) => fetch,
None => {
warn!(
context,
"Missed UID {} in the server response.", request_uid
);
warn!("Missed UID {} in the server response.", request_uid);
continue;
}
};
@@ -1454,7 +1423,7 @@ impl Imap {
};
if is_deleted {
info!(context, "Not processing deleted msg {}.", request_uid);
info!("Not processing deleted msg {}.", request_uid);
last_uid = Some(request_uid);
continue;
}
@@ -1462,10 +1431,7 @@ impl Imap {
let body = if let Some(body) = body {
body
} else {
info!(
context,
"Not processing message {} without a BODY.", request_uid
);
info!("Not processing message {} without a BODY.", request_uid);
last_uid = Some(request_uid);
continue;
};
@@ -1476,17 +1442,13 @@ impl Imap {
rfc724_mid
} else {
error!(
context,
"No Message-ID corresponding to UID {} passed in uid_messsage_ids.",
request_uid
);
continue;
};
info!(
context,
"Passing message UID {} to receive_imf().", request_uid
);
info!("Passing message UID {} to receive_imf().", request_uid);
match receive_imf_inner(
context,
rfc724_mid,
@@ -1503,7 +1465,7 @@ impl Imap {
}
}
Err(err) => {
warn!(context, "receive_imf error: {:#}.", err);
warn!("receive_imf error: {:#}.", err);
}
};
last_uid = Some(request_uid)
@@ -1515,18 +1477,13 @@ impl Imap {
if count != request_uids.len() {
warn!(
context,
"Failed to fetch all UIDs: got {}, requested {}, we requested the UIDs {:?}.",
count,
request_uids.len(),
request_uids,
);
} else {
info!(
context,
"Successfully received {} UIDs.",
request_uids.len()
);
info!("Successfully received {} UIDs.", request_uids.len());
}
}
@@ -1567,7 +1524,7 @@ impl Imap {
return Some(ImapActionResult::RetryLater);
}
if let Err(err) = self.prepare(context).await {
warn!(context, "prepare_imap_op failed: {}", err);
warn!("prepare_imap_op failed: {}", err);
return Some(ImapActionResult::RetryLater);
}
@@ -1577,24 +1534,24 @@ impl Imap {
.context("no IMAP connection established")
{
Err(err) => {
error!(context, "Failed to prepare IMAP operation: {:#}", err);
error!("Failed to prepare IMAP operation: {:#}", err);
return Some(ImapActionResult::Failed);
}
Ok(session) => session,
};
match session.select_folder(context, Some(folder)).await {
match session.select_folder(Some(folder)).await {
Ok(_) => None,
Err(select_folder::Error::ConnectionLost) => {
warn!(context, "Lost imap connection");
warn!("Lost imap connection");
Some(ImapActionResult::RetryLater)
}
Err(select_folder::Error::BadFolderName(folder_name)) => {
warn!(context, "invalid folder name: {:?}", folder_name);
warn!("invalid folder name: {:?}", folder_name);
Some(ImapActionResult::Failed)
}
Err(err) => {
warn!(context, "failed to select folder {:?}: {:#}", folder, err);
warn!("failed to select folder {:?}: {:#}", folder, err);
Some(ImapActionResult::RetryLater)
}
}
@@ -1623,7 +1580,6 @@ impl Imap {
/// Returns first found or created folder name.
async fn configure_mvbox<'a>(
&mut self,
context: &Context,
folders: &[&'a str],
create_mvbox: bool,
) -> Result<Option<&'a str>> {
@@ -1634,15 +1590,15 @@ impl Imap {
// Close currently selected folder if needed.
// We are going to select folders using low-level EXAMINE operations below.
session.select_folder(context, None).await?;
session.select_folder(None).await?;
for folder in folders {
info!(context, "Looking for MVBOX-folder \"{}\"...", &folder);
info!("Looking for MVBOX-folder \"{}\"...", &folder);
let res = session.examine(&folder).await;
if res.is_ok() {
info!(
context,
"MVBOX-folder {:?} successfully selected, using it.", &folder
"MVBOX-folder {:?} successfully selected, using it.",
&folder
);
session.close().await?;
return Ok(Some(folder));
@@ -1653,11 +1609,11 @@ impl Imap {
for folder in folders {
match session.create(&folder).await {
Ok(_) => {
info!(context, "MVBOX-folder {} created.", &folder);
info!("MVBOX-folder {} created.", &folder);
return Ok(Some(folder));
}
Err(err) => {
warn!(context, "Cannot create MVBOX-folder {:?}: {}", &folder, err);
warn!("Cannot create MVBOX-folder {:?}: {}", &folder, err);
}
}
}
@@ -1682,7 +1638,7 @@ impl Imap {
while let Some(folder) = folders.next().await {
let folder = folder?;
info!(context, "Scanning folder: {:?}", folder);
info!("Scanning folder: {:?}", folder);
// Update the delimiter iff there is a different one, but only once.
if let Some(d) = folder.delimiter() {
@@ -1706,11 +1662,11 @@ impl Imap {
}
drop(folders);
info!(context, "Using \"{}\" as folder-delimiter.", delimiter);
info!("Using \"{}\" as folder-delimiter.", delimiter);
let fallback_folder = format!("INBOX{delimiter}DeltaChat");
let mvbox_folder = self
.configure_mvbox(context, &["DeltaChat", &fallback_folder], create_mvbox)
.configure_mvbox(&["DeltaChat", &fallback_folder], create_mvbox)
.await
.context("failed to configure mvbox")?;
@@ -1718,7 +1674,7 @@ impl Imap {
.set_config(Config::ConfiguredInboxFolder, Some("INBOX"))
.await?;
if let Some(mvbox_folder) = mvbox_folder {
info!(context, "Setting MVBOX FOLDER TO {}", &mvbox_folder);
info!("Setting MVBOX FOLDER TO {}", &mvbox_folder);
context
.set_config(Config::ConfiguredMvboxFolder, Some(mvbox_folder))
.await?;
@@ -1731,7 +1687,7 @@ impl Imap {
.set_raw_config_int("folders_configured", DC_FOLDERS_CONFIGURED_VERSION)
.await?;
info!(context, "FINISHED configuring IMAP-folders.");
info!("FINISHED configuring IMAP-folders.");
Ok(())
}
}
@@ -1741,18 +1697,15 @@ impl Session {
/// Drains all responses from `session.unsolicited_responses` in the process.
/// If this returns `true`, this means that new emails arrived and you should
/// fetch again, even if you just fetched.
fn server_sent_unsolicited_exists(&self, context: &Context) -> Result<bool> {
fn server_sent_unsolicited_exists(&self) -> Result<bool> {
let mut unsolicited_exists = false;
while let Ok(response) = self.unsolicited_responses.try_recv() {
match response {
UnsolicitedResponse::Exists(_) => {
info!(
context,
"Need to fetch again, got unsolicited EXISTS {:?}", response
);
info!("Need to fetch again, got unsolicited EXISTS {:?}", response);
unsolicited_exists = true;
}
_ => info!(context, "ignoring unsolicited response {:?}", response),
_ => info!("ignoring unsolicited response {:?}", response),
}
}
Ok(unsolicited_exists)
@@ -1796,8 +1749,8 @@ async fn should_move_out_of_spam(
Some(res) => res,
None => {
warn!(
context,
"Contact with From address {:?} cannot exist, not moving out of spam", from
"Contact with From address {:?} cannot exist, not moving out of spam",
from
);
return Ok(false);
}
@@ -2120,7 +2073,7 @@ pub(crate) async fn prefetch_should_download(
// (prevent_rename is the last argument of from_field_to_contact_id())
if flags.any(|f| f == Flag::Draft) {
info!(context, "Ignoring draft message");
info!("Ignoring draft message");
return Ok(false);
}
@@ -2224,7 +2177,10 @@ pub(crate) async fn markseen_on_imap_table(context: &Context, message_id: &str)
paramsv![message_id],
)
.await?;
context.interrupt_inbox(InterruptInfo::new(false)).await;
context
.scheduler
.interrupt_inbox(InterruptInfo::new(false))
.await;
Ok(())
}
@@ -2422,8 +2378,8 @@ async fn add_all_recipients_as_contacts(
m
} else {
info!(
context,
"Folder {} is not configured, skipping fetching contacts from it.", folder
"Folder {} is not configured, skipping fetching contacts from it.",
folder
);
return Ok(());
};
@@ -2447,10 +2403,8 @@ async fn add_all_recipients_as_contacts(
let recipient_addr = match ContactAddress::new(&recipient.addr) {
Err(err) => {
warn!(
context,
"Could not add contact for recipient with address {:?}: {:#}",
recipient.addr,
err
recipient.addr, err
);
continue;
}

View File

@@ -4,6 +4,7 @@ use anyhow::{bail, Context as _, Result};
use async_channel::Receiver;
use async_imap::extensions::idle::IdleResponse;
use futures_lite::FutureExt;
use tracing::{error, info, warn};
use super::session::Session;
use super::Imap;
@@ -15,7 +16,6 @@ const IDLE_TIMEOUT: Duration = Duration::from_secs(23 * 60);
impl Session {
pub async fn idle(
mut self,
context: &Context,
idle_interrupt_receiver: Receiver<InterruptInfo>,
watch_folder: Option<String>,
) -> Result<(Self, InterruptInfo)> {
@@ -27,14 +27,14 @@ impl Session {
let mut info = Default::default();
self.select_folder(context, watch_folder.as_deref()).await?;
self.select_folder(watch_folder.as_deref()).await?;
if self.server_sent_unsolicited_exists(context)? {
if self.server_sent_unsolicited_exists()? {
return Ok((self, info));
}
if let Ok(info) = idle_interrupt_receiver.try_recv() {
info!(context, "skip idle, got interrupt {:?}", info);
info!("skip idle, got interrupt {:?}", info);
return Ok((self, info));
}
@@ -56,10 +56,7 @@ impl Session {
}
let folder_name = watch_folder.as_deref().unwrap_or("None");
info!(
context,
"{}: Idle entering wait-on-remote state", folder_name
);
info!("{}: Idle entering wait-on-remote state", folder_name);
let fut = idle_wait.map(|ev| ev.map(Event::IdleResponse)).race(async {
let info = idle_interrupt_receiver.recv().await;
@@ -71,29 +68,20 @@ impl Session {
match fut.await {
Ok(Event::IdleResponse(IdleResponse::NewData(x))) => {
info!(context, "{}: Idle has NewData {:?}", folder_name, x);
info!("{}: Idle has NewData {:?}", folder_name, x);
}
Ok(Event::IdleResponse(IdleResponse::Timeout)) => {
info!(
context,
"{}: Idle-wait timeout or interruption", folder_name
);
info!("{}: Idle-wait timeout or interruption", folder_name);
}
Ok(Event::IdleResponse(IdleResponse::ManualInterrupt)) => {
info!(
context,
"{}: Idle wait was interrupted manually", folder_name
);
info!("{}: Idle wait was interrupted manually", folder_name);
}
Ok(Event::Interrupt(i)) => {
info!(
context,
"{}: Idle wait was interrupted: {:?}", folder_name, &i
);
info!("{}: Idle wait was interrupted: {:?}", folder_name, &i);
info = i;
}
Err(err) => {
warn!(context, "{}: Idle wait errored: {:?}", folder_name, err);
warn!("{}: Idle wait errored: {:?}", folder_name, err);
}
}
@@ -124,14 +112,14 @@ impl Imap {
let watch_folder = if let Some(watch_folder) = watch_folder {
watch_folder
} else {
info!(context, "IMAP-fake-IDLE: no folder, waiting for interrupt");
info!("IMAP-fake-IDLE: no folder, waiting for interrupt");
return self
.idle_interrupt_receiver
.recv()
.await
.unwrap_or_default();
};
info!(context, "IMAP-fake-IDLEing folder={:?}", watch_folder);
info!("IMAP-fake-IDLEing folder={:?}", watch_folder);
// check every minute if there are new messages
// TODO: grow sleep durations / make them more flexible
@@ -159,7 +147,7 @@ impl Imap {
// (setup_handle_if_needed might not know about them if we
// never successfully connected)
if let Err(err) = self.prepare(context).await {
warn!(context, "fake_idle: could not connect: {}", err);
warn!("fake_idle: could not connect: {}", err);
continue;
}
if let Some(session) = &self.session {
@@ -168,7 +156,7 @@ impl Imap {
break InterruptInfo::new(false);
}
}
info!(context, "fake_idle is connected");
info!("fake_idle is connected");
// we are connected, let's see if fetching messages results
// in anything. If so, we behave as if IDLE had data but
// will have already fetched the messages so perform_*_fetch
@@ -178,27 +166,26 @@ impl Imap {
.await
{
Ok(res) => {
info!(context, "fetch_new_messages returned {:?}", res);
info!("fetch_new_messages returned {:?}", res);
if res {
break InterruptInfo::new(false);
}
}
Err(err) => {
error!(context, "could not fetch from folder: {:#}", err);
self.trigger_reconnect(context);
error!("could not fetch from folder: {:#}", err);
self.trigger_reconnect();
}
}
}
Event::Interrupt(info) => {
// Interrupt
info!(context, "Fake IDLE interrupted");
info!("Fake IDLE interrupted");
break info;
}
}
};
info!(
context,
"IMAP-fake-IDLE done after {:.4}s",
SystemTime::now()
.duration_since(fake_idle_start_time)

View File

@@ -2,6 +2,7 @@ use std::{collections::BTreeMap, time::Instant};
use anyhow::{Context as _, Result};
use futures::stream::StreamExt;
use tracing::info;
use super::{get_folder_meaning_by_attrs, get_folder_meaning_by_name};
use crate::config::Config;
@@ -24,7 +25,7 @@ impl Imap {
return Ok(false);
}
}
info!(context, "Starting full folder scan");
info!("Starting full folder scan");
self.prepare(context).await?;
let folders = self.list_folders(context).await?;
@@ -65,7 +66,7 @@ impl Imap {
{
let session = self.session.as_mut().context("no session")?;
// Drain leftover unsolicited EXISTS messages
session.server_sent_unsolicited_exists(context)?;
session.server_sent_unsolicited_exists()?;
loop {
self.fetch_move_delete(context, folder.name(), folder_meaning)
@@ -74,7 +75,7 @@ impl Imap {
let session = self.session.as_mut().context("no session")?;
// If the server sent an unsocicited EXISTS during the fetch, we need to fetch again
if !session.server_sent_unsolicited_exists(context)? {
if !session.server_sent_unsolicited_exists()? {
break;
}
}

View File

@@ -1,9 +1,9 @@
//! # IMAP folder selection module.
use anyhow::Context as _;
use tracing::info;
use super::session::Session as ImapSession;
use crate::context::Context;
type Result<T> = std::result::Result<T, Error>;
@@ -34,13 +34,13 @@ impl ImapSession {
///
/// CLOSE is considerably faster than an EXPUNGE, see
/// <https://tools.ietf.org/html/rfc3501#section-6.4.2>
pub(super) async fn maybe_close_folder(&mut self, context: &Context) -> anyhow::Result<()> {
pub(super) async fn maybe_close_folder(&mut self) -> anyhow::Result<()> {
if let Some(folder) = &self.selected_folder {
if self.selected_folder_needs_expunge {
info!(context, "Expunge messages in \"{}\".", folder);
info!("Expunge messages in \"{}\".", folder);
self.close().await.context("IMAP close/expunge failed")?;
info!(context, "close/expunge succeeded");
info!("close/expunge succeeded");
self.selected_folder = None;
self.selected_folder_needs_expunge = false;
}
@@ -51,11 +51,7 @@ impl ImapSession {
/// Selects a folder, possibly updating uid_validity and, if needed,
/// expunging the folder to remove delete-marked messages.
/// Returns whether a new folder was selected.
pub(super) async fn select_folder(
&mut self,
context: &Context,
folder: Option<&str>,
) -> Result<NewlySelected> {
pub(super) async fn select_folder(&mut self, folder: Option<&str>) -> Result<NewlySelected> {
// if there is a new folder and the new folder is equal to the selected one, there's nothing to do.
// if there is _no_ new folder, we continue as we might want to expunge below.
if let Some(folder) = folder {
@@ -67,7 +63,7 @@ impl ImapSession {
}
// deselect existing folder, if needed (it's also done implicitly by SELECT, however, without EXPUNGE then)
self.maybe_close_folder(context).await?;
self.maybe_close_folder().await?;
// select new folder
if let Some(folder) = folder {
@@ -104,19 +100,18 @@ impl ImapSession {
/// Selects a folder. Tries to create it once and select again if the folder does not exist.
pub(super) async fn select_or_create_folder(
&mut self,
context: &Context,
folder: &str,
) -> anyhow::Result<NewlySelected> {
match self.select_folder(context, Some(folder)).await {
match self.select_folder( Some(folder)).await {
Ok(newly_selected) => Ok(newly_selected),
Err(err) => match err {
Error::NoFolder(..) => {
info!(context, "Failed to select folder {} because it does not exist, trying to create it.", folder);
info!( "Failed to select folder {} because it does not exist, trying to create it.", folder);
self.create(folder).await.with_context(|| {
format!("Couldn't select folder ('{err}'), then create() failed")
})?;
Ok(self.select_folder(context, Some(folder)).await.with_context(|| format!("failed to select newely created folder {folder}"))?)
Ok(self.select_folder(Some(folder)).await.with_context(|| format!("failed to select newely created folder {folder}"))?)
}
_ => Err(err).with_context(|| format!("failed to select folder {folder} with error other than NO, not trying to create it")),
},

View File

@@ -5,18 +5,20 @@ use std::ffi::OsStr;
use std::path::{Path, PathBuf};
use ::pgp::types::KeyTrait;
use anyhow::{bail, ensure, format_err, Context as _, Error, Result};
use anyhow::{bail, ensure, format_err, Context as _, Result};
use futures::StreamExt;
use futures_lite::FutureExt;
use rand::{thread_rng, Rng};
use tokio::fs::{self, File};
use tokio_tar::Archive;
use tracing::{error, info, warn};
use crate::blob::BlobObject;
use crate::blob::{BlobDirContents, BlobObject};
use crate::chat::{self, delete_and_reset_all_device_msgs, ChatId};
use crate::config::Config;
use crate::contact::ContactId;
use crate::context::Context;
use crate::e2ee;
use crate::events::EventType;
use crate::key::{self, DcKey, DcSecretKey, SignedPublicKey, SignedSecretKey};
use crate::log::LogExt;
@@ -30,7 +32,10 @@ use crate::tools::{
create_folder, delete_file, get_filesuffix_lc, open_file_std, read_file, time, write_file,
EmailAddress,
};
use crate::{e2ee, tools};
mod transfer;
pub use transfer::{get_backup, BackupProvider};
// Name of the database file in the backup.
const DBFILE_BACKUP_NAME: &str = "dc_database_backup.sqlite";
@@ -86,21 +91,23 @@ pub async fn imex(
) -> Result<()> {
let cancel = context.alloc_ongoing().await?;
let res = imex_inner(context, what, path, passphrase)
.race(async {
cancel.recv().await.ok();
Err(format_err!("canceled"))
})
.await;
let res = {
let _guard = context.scheduler.pause(context.clone()).await;
imex_inner(context, what, path, passphrase)
.race(async {
cancel.recv().await.ok();
Err(format_err!("canceled"))
})
.await
};
context.free_ongoing().await;
if let Err(err) = res.as_ref() {
// We are using Anyhow's .context() and to show the inner error, too, we need the {:#}:
error!(context, "IMEX failed to complete: {:#}", err);
error!("IMEX failed to complete: {:#}", err);
context.emit_event(EventType::ImexProgress(0));
} else {
info!(context, "IMEX successfully completed");
info!("IMEX successfully completed");
context.emit_event(EventType::ImexProgress(1000));
}
@@ -108,7 +115,7 @@ pub async fn imex(
}
/// Returns the filename of the backup found (otherwise an error)
pub async fn has_backup(_context: &Context, dir_name: &Path) -> Result<String> {
pub async fn has_backup(dir_name: &Path) -> Result<String> {
let mut dir_iter = tokio::fs::read_dir(dir_name).await?;
let mut newest_backup_name = "".to_string();
let mut newest_backup_path: Option<PathBuf> = None;
@@ -138,7 +145,7 @@ pub async fn has_backup(_context: &Context, dir_name: &Path) -> Result<String> {
///
/// Returns setup code.
pub async fn initiate_key_transfer(context: &Context) -> Result<String> {
let setup_code = create_setup_code(context);
let setup_code = create_setup_code();
/* this may require a keypair to be created. this may take a second ... */
let setup_file_content = render_setup_file(context, &setup_code).await?;
/* encrypting may also take a while ... */
@@ -220,7 +227,7 @@ pub async fn render_setup_file(context: &Context, passphrase: &str) -> Result<St
}
/// Creates a new setup code for Autocrypt Setup Message.
pub fn create_setup_code(_context: &Context) -> String {
pub fn create_setup_code() -> String {
let mut random_val: u16;
let mut rng = thread_rng();
let mut ret = String::new();
@@ -250,7 +257,7 @@ async fn maybe_add_bcc_self_device_msg(context: &Context) -> Result<()> {
msg.text = Some(
"It seems you are using multiple devices with Delta Chat. Great!\n\n\
If you also want to synchronize outgoing messages across all devices, \
go to the settings and enable \"Send copy to self\"."
go to \"Settings → Advanced\" and enable \"Send Copy to Self\"."
.to_string(),
);
chat::add_device_msg(context, Some("bcc-self-hint"), Some(&mut msg)).await?;
@@ -337,7 +344,7 @@ async fn set_self_key(
)
.await?;
info!(context, "stored self key: {:?}", keypair.secret.key_id());
info!("stored self key: {:?}", keypair.secret.key_id());
Ok(())
}
@@ -370,7 +377,7 @@ async fn imex_inner(
path: &Path,
passphrase: Option<String>,
) -> Result<()> {
info!(context, "Import/export dir: {}", path.display());
info!("Import/export dir: {}", path.display());
ensure!(context.sql.is_open().await, "Database not opened.");
context.emit_event(EventType::ImexProgress(10));
@@ -413,14 +420,13 @@ async fn import_backup(
"Cannot import backups to accounts in use."
);
ensure!(
context.scheduler.read().await.is_none(),
!context.scheduler.is_running().await,
"cannot import backup, IO is running"
);
let backup_file = File::open(backup_to_import).await?;
let file_size = backup_file.metadata().await?.len();
info!(
context,
"Import \"{}\" ({} bytes) to \"{}\".",
backup_to_import.display(),
file_size,
@@ -462,7 +468,7 @@ async fn import_backup(
if let Some(name) = from_path.file_name() {
fs::rename(&from_path, context.get_blobdir().join(name)).await?;
} else {
warn!(context, "No file name");
warn!("No file name.");
}
}
}
@@ -516,50 +522,16 @@ async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Res
let _d1 = DeleteOnDrop(temp_db_path.clone());
let _d2 = DeleteOnDrop(temp_path.clone());
context
.sql
.set_raw_config_int("backup_time", now as i32)
.await?;
sql::housekeeping(context).await.ok_or_log(context);
ensure!(
context.scheduler.read().await.is_none(),
"cannot export backup, IO is running"
);
export_database(context, &temp_db_path, passphrase)
.await
.context("could not export database")?;
info!(
context,
"Backup '{}' to '{}'.",
context.get_dbfile().display(),
dest_path.display(),
);
let path_str = temp_db_path
.to_str()
.with_context(|| format!("path {temp_db_path:?} is not valid unicode"))?;
context
.sql
.call_write(|conn| {
if let Err(err) = conn.execute("VACUUM", params![]) {
info!(context, "Vacuum failed, exporting anyway: {:#}.", err);
}
conn.execute(
"ATTACH DATABASE ? AS backup KEY ?",
paramsv![path_str, passphrase],
)
.context("failed to attach backup database")?;
let res = conn
.query_row("SELECT sqlcipher_export('backup')", [], |_row| Ok(()))
.context("failed to export to attached backup database");
conn.execute("DETACH DATABASE backup", [])
.context("failed to detach backup database")?;
res?;
Ok::<_, Error>(())
})
.await?;
let res = export_backup_inner(context, &temp_db_path, &temp_path).await;
match &res {
@@ -568,7 +540,7 @@ async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Res
context.emit_event(EventType::ImexFileWritten(dest_path));
}
Err(e) => {
error!(context, "backup failed: {}", e);
error!("backup failed: {}", e);
}
}
@@ -597,29 +569,15 @@ async fn export_backup_inner(
.append_path_with_name(temp_db_path, DBFILE_BACKUP_NAME)
.await?;
let read_dir = tools::read_dir(context.get_blobdir()).await?;
let count = read_dir.len();
let mut written_files = 0;
let blobdir = BlobDirContents::new(context).await?;
let mut last_progress = 0;
for entry in read_dir {
let name = entry.file_name();
if !entry.file_type().await?.is_file() {
warn!(
context,
"Export: Found dir entry {} that is not a file, ignoring",
name.to_string_lossy()
);
continue;
}
let mut file = File::open(entry.path()).await?;
let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(name);
builder.append_file(path_in_archive, &mut file).await?;
written_files += 1;
let progress = 1000 * written_files / count;
for (i, blob) in blobdir.iter().enumerate() {
let mut file = File::open(blob.to_abs_path()).await?;
let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(blob.as_name());
builder.append_file(path_in_archive, &mut file).await?;
let progress = 1000 * i / blobdir.len();
if progress != last_progress && progress > 10 && progress < 1000 {
// We already emitted ImexProgress(10) above
context.emit_event(EventType::ImexProgress(progress));
last_progress = progress;
}
@@ -654,7 +612,7 @@ async fn import_self_keys(context: &Context, dir: &Path) -> Result<()> {
continue;
}
set_default = if name_f.contains("legacy") {
info!(context, "found legacy key '{}'", path_plus_name.display());
info!("found legacy key '{}'", path_plus_name.display());
false
} else {
true
@@ -664,17 +622,13 @@ async fn import_self_keys(context: &Context, dir: &Path) -> Result<()> {
continue;
}
}
info!(
context,
"considering key file: {}",
path_plus_name.display()
);
info!("considering key file: {}", path_plus_name.display());
match read_file(context, &path_plus_name).await {
Ok(buf) => {
let armored = std::string::String::from_utf8_lossy(&buf);
if let Err(err) = set_self_key(context, &armored, set_default, false).await {
info!(context, "set_self_key: {}", err);
info!("set_self_key: {}", err);
continue;
}
}
@@ -719,7 +673,7 @@ async fn export_self_keys(context: &Context, dir: &Path) -> Result<()> {
let id = Some(id).filter(|_| is_default != 0);
if let Ok(key) = public_key {
if let Err(err) = export_key_to_asc_file(context, dir, id, &key).await {
error!(context, "Failed to export public key: {:#}.", err);
error!("Failed to export public key: {:#}.", err);
export_errors += 1;
}
} else {
@@ -727,7 +681,7 @@ async fn export_self_keys(context: &Context, dir: &Path) -> Result<()> {
}
if let Ok(key) = private_key {
if let Err(err) = export_key_to_asc_file(context, dir, id, &key).await {
error!(context, "Failed to export private key: {:#}.", err);
error!("Failed to export private key: {:#}.", err);
export_errors += 1;
}
} else {
@@ -764,7 +718,6 @@ where
dir.join(format!("{}-key-{}.asc", kind, &id))
};
info!(
context,
"Exporting key {:?} to {}",
key.key_id(),
file_name.display()
@@ -781,6 +734,48 @@ where
Ok(())
}
/// Exports the database to *dest*, encrypted using *passphrase*.
///
/// The directory of *dest* must already exist, if *dest* itself exists it will be
/// overwritten.
///
/// This also verifies that IO is not running during the export.
async fn export_database(context: &Context, dest: &Path, passphrase: String) -> Result<()> {
ensure!(
!context.scheduler.is_running().await,
"cannot export backup, IO is running"
);
let now = time().try_into().context("32-bit UNIX time overflow")?;
// TODO: Maybe introduce camino crate for UTF-8 paths where we need them.
let dest = dest
.to_str()
.with_context(|| format!("path {} is not valid unicode", dest.display()))?;
context.sql.set_raw_config_int("backup_time", now).await?;
sql::housekeeping(context).await.ok_or_log(context);
context
.sql
.call_write(|conn| {
conn.execute("VACUUM;", params![])
.map_err(|err| warn!("Vacuum failed, exporting anyway {err:#}."))
.ok();
conn.execute(
"ATTACH DATABASE ? AS backup KEY ?",
paramsv![dest, passphrase],
)
.context("failed to attach backup database")?;
let res = conn
.query_row("SELECT sqlcipher_export('backup')", [], |_row| Ok(()))
.context("failed to export to attached backup database");
conn.execute("DETACH DATABASE backup", [])
.context("failed to detach backup database")?;
res?;
Ok(())
})
.await
}
#[cfg(test)]
mod tests {
use std::time::Duration;
@@ -823,8 +818,7 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_create_setup_code() {
let t = TestContext::new().await;
let setupcode = create_setup_code(&t);
let setupcode = create_setup_code();
assert_eq!(setupcode.len(), 44);
assert_eq!(setupcode.chars().nth(4).unwrap(), '-');
assert_eq!(setupcode.chars().nth(9).unwrap(), '-');
@@ -889,7 +883,7 @@ mod tests {
let context2 = TestContext::new().await;
assert!(!context2.is_configured().await?);
assert!(has_backup(&context2, backup_dir.path()).await.is_err());
assert!(has_backup(backup_dir.path()).await.is_err());
// export from context1
assert!(
@@ -903,7 +897,7 @@ mod tests {
.await;
// import to context2
let backup = has_backup(&context2, backup_dir.path()).await?;
let backup = has_backup(backup_dir.path()).await?;
// Import of unencrypted backup with incorrect "foobar" backup passphrase fails.
assert!(imex(
@@ -948,7 +942,7 @@ mod tests {
imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
// import to context2
let backup = has_backup(&context2, backup_dir.path()).await?;
let backup = has_backup(backup_dir.path()).await?;
let context2_cloned = context2.clone();
let handle = task::spawn(async move {
imex(

708
src/imex/transfer.rs Normal file
View File

@@ -0,0 +1,708 @@
//! Transfer a backup to an other device.
//!
//! This module provides support for using n0's iroh tool to initiate transfer of a backup
//! to another device using a QR code.
//!
//! Using the iroh terminology there are two parties to this:
//!
//! - The *Provider*, which starts a server and listens for connections.
//! - The *Getter*, which connects to the server and retrieves the data.
//!
//! Iroh is designed around the idea of verifying hashes, the downloads are verified as
//! they are retrieved. The entire transfer is initiated by requesting the data of a single
//! root hash.
//!
//! Both the provider and the getter are authenticated:
//!
//! - The provider is known by its *peer ID*.
//! - The provider needs an *authentication token* from the getter before it accepts a
//! connection.
//!
//! Both these are transferred in the QR code offered to the getter. This ensures that the
//! getter can not connect to an impersonated provider and the provider does not offer the
//! download to an impersonated getter.
use std::cmp::Ordering;
use std::future::Future;
use std::net::Ipv4Addr;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::task::Poll;
use anyhow::{anyhow, bail, ensure, format_err, Context as _, Result};
use async_channel::Receiver;
use futures_lite::StreamExt;
use iroh::get::{DataStream, Options};
use iroh::progress::ProgressEmitter;
use iroh::protocol::AuthToken;
use iroh::provider::{DataSource, Event, Provider, Ticket};
use iroh::Hash;
use tokio::fs::{self, File};
use tokio::io::{self, AsyncWriteExt, BufWriter};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::{broadcast, Mutex};
use tokio::task::{JoinHandle, JoinSet};
use tokio_stream::wrappers::ReadDirStream;
use tracing::{error, info, warn};
use crate::blob::BlobDirContents;
use crate::chat::delete_and_reset_all_device_msgs;
use crate::context::Context;
use crate::qr::Qr;
use crate::{e2ee, EventType};
use super::{export_database, DBFILE_BACKUP_NAME};
/// Provide or send a backup of this device.
///
/// This creates a backup of the current device and starts a service which offers another
/// device to download this backup.
///
/// This does not make a full backup on disk, only the SQLite database is created on disk,
/// the blobs in the blob directory are not copied.
///
/// This starts a task which acquires the global "ongoing" mutex. If you need to stop the
/// task use the [`Context::stop_ongoing`] mechanism.
///
/// The task implements [`Future`] and awaiting it will complete once a transfer has been
/// either completed or aborted.
#[derive(Debug)]
pub struct BackupProvider {
/// The supervisor task, run by [`BackupProvider::watch_provider`].
handle: JoinHandle<Result<()>>,
/// The ticket to retrieve the backup collection.
ticket: Ticket,
}
impl BackupProvider {
/// Prepares for sending a backup to a second device.
///
/// Before calling this function all I/O must be stopped so that no changes to the blobs
/// or database are happening, this is done by calling the [`Accounts::stop_io`] or
/// [`Context::stop_io`] APIs first.
///
/// This will acquire the global "ongoing process" mutex, which can be used to cancel
/// the process.
///
/// [`Accounts::stop_io`]: crate::accounts::Accounts::stop_io
pub async fn prepare(context: &Context) -> Result<Self> {
e2ee::ensure_secret_key_exists(context)
.await
.context("Private key not available, aborting backup export")?;
// Acquire global "ongoing" mutex.
let cancel_token = context.alloc_ongoing().await?;
let paused_guard = context.scheduler.pause(context.clone()).await;
let context_dir = context
.get_blobdir()
.parent()
.ok_or(anyhow!("Context dir not found"))?;
let dbfile = context_dir.join(DBFILE_BACKUP_NAME);
if fs::metadata(&dbfile).await.is_ok() {
fs::remove_file(&dbfile).await?;
warn!("Previous database export deleted.");
}
let dbfile = TempPathGuard::new(dbfile);
let res = tokio::select! {
biased;
res = Self::prepare_inner(context, &dbfile) => {
match res {
Ok(slf) => Ok(slf),
Err(err) => {
error!("Failed to set up second device setup: {err:#}.");
Err(err)
},
}
},
_ = cancel_token.recv() => Err(format_err!("cancelled")),
};
let (provider, ticket) = match res {
Ok((provider, ticket)) => (provider, ticket),
Err(err) => {
context.free_ongoing().await;
return Err(err);
}
};
let handle = {
let context = context.clone();
tokio::spawn(async move {
let res = Self::watch_provider(&context, provider, cancel_token).await;
context.free_ongoing().await;
// Explicit drop to move the guards into this future
drop(paused_guard);
drop(dbfile);
res
})
};
Ok(Self { handle, ticket })
}
/// Creates the provider task.
///
/// Having this as a function makes it easier to cancel it when needed.
async fn prepare_inner(context: &Context, dbfile: &Path) -> Result<(Provider, Ticket)> {
// Generate the token up front: we also use it to encrypt the database.
let token = AuthToken::generate();
context.emit_event(SendProgress::Started.into());
export_database(context, dbfile, token.to_string())
.await
.context("Database export failed")?;
context.emit_event(SendProgress::DatabaseExported.into());
// Now we can be sure IO is not running.
let mut files = vec![DataSource::with_name(
dbfile.to_owned(),
format!("db/{DBFILE_BACKUP_NAME}"),
)];
let blobdir = BlobDirContents::new(context).await?;
for blob in blobdir.iter() {
let path = blob.to_abs_path();
let name = format!("blob/{}", blob.as_file_name());
files.push(DataSource::with_name(path, name));
}
// Start listening.
let (db, hash) = iroh::provider::create_collection(files).await?;
context.emit_event(SendProgress::CollectionCreated.into());
let provider = Provider::builder(db)
.bind_addr((Ipv4Addr::UNSPECIFIED, 0).into())
.auth_token(token)
.spawn()?;
context.emit_event(SendProgress::ProviderListening.into());
info!("Waiting for remote to connect.");
let ticket = provider.ticket(hash);
Ok((provider, ticket))
}
/// Supervises the iroh [`Provider`], terminating it when needed.
///
/// This will watch the provider and terminate it when:
///
/// - A transfer is completed, successful or unsuccessful.
/// - An event could not be observed to protect against not knowing of a completed event.
/// - The ongoing process is cancelled.
///
/// The *cancel_token* is the handle for the ongoing process mutex, when this completes
/// we must cancel this operation.
async fn watch_provider(
context: &Context,
mut provider: Provider,
cancel_token: Receiver<()>,
) -> Result<()> {
// _dbfile exists so we can clean up the file once it is no longer needed
let mut events = provider.subscribe();
let mut total_size = 0;
let mut current_size = 0;
let res = loop {
tokio::select! {
biased;
res = &mut provider => {
break res.context("BackupProvider failed");
},
maybe_event = events.recv() => {
match maybe_event {
Ok(event) => {
match event {
Event::ClientConnected { ..} => {
context.emit_event(SendProgress::ClientConnected.into());
}
Event::RequestReceived { .. } => {
}
Event::TransferCollectionStarted { total_blobs_size, .. } => {
total_size = total_blobs_size;
context.emit_event(SendProgress::TransferInProgress {
current_size,
total_size,
}.into());
}
Event::TransferBlobCompleted { size, .. } => {
current_size += size;
context.emit_event(SendProgress::TransferInProgress {
current_size,
total_size,
}.into());
}
Event::TransferCollectionCompleted { .. } => {
context.emit_event(SendProgress::TransferInProgress {
current_size: total_size,
total_size
}.into());
provider.shutdown();
}
Event::TransferAborted { .. } => {
provider.shutdown();
break Err(anyhow!("BackupProvider transfer aborted"));
}
}
}
Err(broadcast::error::RecvError::Closed) => {
// We should never see this, provider.join() should complete
// first.
}
Err(broadcast::error::RecvError::Lagged(_)) => {
// We really shouldn't be lagging, if we did we may have missed
// a completion event.
provider.shutdown();
break Err(anyhow!("Missed events from BackupProvider"));
}
}
},
_ = cancel_token.recv() => {
provider.shutdown();
break Err(anyhow!("BackupSender cancelled"));
},
}
};
match &res {
Ok(_) => context.emit_event(SendProgress::Completed.into()),
Err(err) => {
error!("Backup transfer failure: {err:#}.");
context.emit_event(SendProgress::Failed.into())
}
}
res
}
/// Returns a QR code that allows fetching this backup.
///
/// This QR code can be passed to [`get_backup`] on a (different) device.
pub fn qr(&self) -> Qr {
Qr::Backup {
ticket: self.ticket.clone(),
}
}
}
impl Future for BackupProvider {
type Output = Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.handle).poll(cx)?
}
}
/// A guard which will remove the path when dropped.
///
/// It implements [`Deref`] it it can be used as a `&Path`.
#[derive(Debug)]
struct TempPathGuard {
path: PathBuf,
}
impl TempPathGuard {
fn new(path: PathBuf) -> Self {
Self { path }
}
}
impl Drop for TempPathGuard {
fn drop(&mut self) {
let path = self.path.clone();
tokio::spawn(async move {
fs::remove_file(&path).await.ok();
});
}
}
impl Deref for TempPathGuard {
type Target = Path;
fn deref(&self) -> &Self::Target {
&self.path
}
}
/// Create [`EventType::ImexProgress`] events using readable names.
///
/// Plus you get warnings if you don't use all variants.
#[derive(Debug)]
enum SendProgress {
Failed,
Started,
DatabaseExported,
CollectionCreated,
ProviderListening,
ClientConnected,
TransferInProgress { current_size: u64, total_size: u64 },
Completed,
}
impl From<SendProgress> for EventType {
fn from(source: SendProgress) -> Self {
use SendProgress::*;
let num: u16 = match source {
Failed => 0,
Started => 100,
DatabaseExported => 300,
CollectionCreated => 350,
ProviderListening => 400,
ClientConnected => 450,
TransferInProgress {
current_size,
total_size,
} => {
// the range is 450..=950
450 + ((current_size as f64 / total_size as f64) * 500.).floor() as u16
}
Completed => 1000,
};
Self::ImexProgress(num.into())
}
}
/// Contacts a backup provider and receives the backup from it.
///
/// This uses a QR code to contact another instance of deltachat which is providing a backup
/// using the [`BackupProvider`]. Once connected it will authenticate using the secrets in
/// the QR code and retrieve the backup.
///
/// This is a long running operation which will only when completed.
///
/// Using [`Qr`] as argument is a bit odd as it only accepts one specific variant of it. It
/// does avoid having [`iroh::provider::Ticket`] in the primary API however, without
/// having to revert to untyped bytes.
pub async fn get_backup(context: &Context, qr: Qr) -> Result<()> {
ensure!(
matches!(qr, Qr::Backup { .. }),
"QR code for backup must be of type DCBACKUP"
);
ensure!(
!context.is_configured().await?,
"Cannot import backups to accounts in use."
);
let _guard = context.scheduler.pause(context.clone()).await;
// Acquire global "ongoing" mutex.
let cancel_token = context.alloc_ongoing().await?;
let res = tokio::select! {
biased;
res = get_backup_inner(context, qr) => {
context.free_ongoing().await;
res
}
_ = cancel_token.recv() => Err(format_err!("cancelled")),
};
res
}
async fn get_backup_inner(context: &Context, qr: Qr) -> Result<()> {
let mut ticket = match qr {
Qr::Backup { ticket } => ticket,
_ => bail!("QR code for backup must be of type DCBACKUP"),
};
if ticket.addrs.is_empty() {
bail!("ticket is missing addresses to dial");
}
// Crude sorting, most local wifi's are in the 192.168.0.0/24 range so this will try
// them first.
ticket.addrs.sort_by(|a, b| {
let a = a.to_string();
let b = b.to_string();
if a.starts_with("192.168.") && !b.starts_with("192.168.") {
Ordering::Less
} else if b.starts_with("192.168.") && !a.starts_with("192.168.") {
Ordering::Greater
} else {
Ordering::Equal
}
});
for addr in &ticket.addrs {
let opts = Options {
addr: *addr,
peer_id: Some(ticket.peer),
keylog: false,
};
info!("Attempting to contact {addr}.");
match transfer_from_provider(context, &ticket, opts).await {
Ok(_) => {
delete_and_reset_all_device_msgs(context).await?;
context.emit_event(ReceiveProgress::Completed.into());
return Ok(());
}
Err(TransferError::ConnectionError(err)) => {
warn!("Connection error: {err:#}.");
continue;
}
Err(TransferError::Other(err)) => {
// Clean up any blobs we already wrote.
let readdir = fs::read_dir(context.get_blobdir()).await?;
let mut readdir = ReadDirStream::new(readdir);
while let Some(dirent) = readdir.next().await {
if let Ok(dirent) = dirent {
fs::remove_file(dirent.path()).await.ok();
}
}
context.emit_event(ReceiveProgress::Failed.into());
return Err(err);
}
}
}
Err(anyhow!("failed to contact provider"))
}
/// Error during a single transfer attempt.
///
/// Mostly exists to distinguish between `ConnectionError` and any other errors.
#[derive(Debug, thiserror::Error)]
enum TransferError {
#[error("connection error")]
ConnectionError(#[source] anyhow::Error),
#[error("other")]
Other(#[source] anyhow::Error),
}
async fn transfer_from_provider(
context: &Context,
ticket: &Ticket,
opts: Options,
) -> Result<(), TransferError> {
let progress = ProgressEmitter::new(0, ReceiveProgress::max_blob_progress());
spawn_progress_proxy(context.clone(), progress.subscribe());
let mut connected = false;
let on_connected = || {
context.emit_event(ReceiveProgress::Connected.into());
connected = true;
async { Ok(()) }
};
let jobs = Mutex::new(JoinSet::default());
let on_blob =
|hash, reader, name| on_blob(context, &progress, &jobs, ticket, hash, reader, name);
let res = iroh::get::run(
ticket.hash,
ticket.token,
opts,
on_connected,
|collection| {
context.emit_event(ReceiveProgress::CollectionReceived.into());
progress.set_total(collection.total_blobs_size());
async { Ok(()) }
},
on_blob,
)
.await;
let mut jobs = jobs.lock().await;
while let Some(job) = jobs.join_next().await {
job.context("job failed").map_err(TransferError::Other)?;
}
drop(progress);
match res {
Ok(stats) => {
info!(
"Backup transfer finished, transfer rate is {} Mbps.",
stats.mbits()
);
Ok(())
}
Err(err) => match connected {
true => Err(TransferError::Other(err)),
false => Err(TransferError::ConnectionError(err)),
},
}
}
/// Get callback when a blob is received from the provider.
///
/// This writes the blobs to the blobdir. If the blob is the database it will import it to
/// the database of the current [`Context`].
async fn on_blob(
context: &Context,
progress: &ProgressEmitter,
jobs: &Mutex<JoinSet<()>>,
ticket: &Ticket,
_hash: Hash,
mut reader: DataStream,
name: String,
) -> Result<DataStream> {
ensure!(!name.is_empty(), "Received a nameless blob");
let path = if name.starts_with("db/") {
let context_dir = context
.get_blobdir()
.parent()
.ok_or(anyhow!("Context dir not found"))?;
let dbfile = context_dir.join(DBFILE_BACKUP_NAME);
if fs::metadata(&dbfile).await.is_ok() {
fs::remove_file(&dbfile).await?;
warn!("Previous database export deleted.");
}
dbfile
} else {
ensure!(name.starts_with("blob/"), "malformatted blob name");
let blobname = name.rsplit('/').next().context("malformatted blob name")?;
context.get_blobdir().join(blobname)
};
let mut wrapped_reader = progress.wrap_async_read(&mut reader);
let file = File::create(&path).await?;
let mut file = BufWriter::with_capacity(128 * 1024, file);
io::copy(&mut wrapped_reader, &mut file).await?;
file.flush().await?;
if name.starts_with("db/") {
let context = context.clone();
let token = ticket.token.to_string();
jobs.lock().await.spawn(async move {
if let Err(err) = context.sql.import(&path, token).await {
error!("Cannot import database: {err:#?}.");
}
if let Err(err) = fs::remove_file(&path).await {
error!(
"Failed to delete database import file '{}': {:#?}.",
path.display(),
err,
);
}
});
}
Ok(reader)
}
/// Spawns a task proxying progress events.
///
/// This spawns a tokio task which receives events from the [`ProgressEmitter`] and sends
/// them to the context. The task finishes when the emitter is dropped.
///
/// This could be done directly in the emitter by making it less generic.
fn spawn_progress_proxy(context: Context, mut rx: broadcast::Receiver<u16>) {
tokio::spawn(async move {
loop {
match rx.recv().await {
Ok(step) => context.emit_event(ReceiveProgress::BlobProgress(step).into()),
Err(RecvError::Closed) => break,
Err(RecvError::Lagged(_)) => continue,
}
}
});
}
/// Create [`EventType::ImexProgress`] events using readable names.
///
/// Plus you get warnings if you don't use all variants.
#[derive(Debug)]
enum ReceiveProgress {
Connected,
CollectionReceived,
/// A value between 0 and 85 interpreted as a percentage.
///
/// Other values are already used by the other variants of this enum.
BlobProgress(u16),
Completed,
Failed,
}
impl ReceiveProgress {
/// The maximum value for [`ReceiveProgress::BlobProgress`].
///
/// This only exists to keep this magic value local in this type.
fn max_blob_progress() -> u16 {
85
}
}
impl From<ReceiveProgress> for EventType {
fn from(source: ReceiveProgress) -> Self {
let val = match source {
ReceiveProgress::Connected => 50,
ReceiveProgress::CollectionReceived => 100,
ReceiveProgress::BlobProgress(val) => 100 + 10 * val,
ReceiveProgress::Completed => 1000,
ReceiveProgress::Failed => 0,
};
EventType::ImexProgress(val.into())
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::chat::{get_chat_msgs, send_msg, ChatItem};
use crate::message::{Message, Viewtype};
use crate::test_utils::TestContextManager;
use super::*;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_send_receive() {
let mut tcm = TestContextManager::new();
// Create first device.
let ctx0 = tcm.alice().await;
// Write a message in the self chat
let self_chat = ctx0.get_self_chat().await;
let mut msg = Message::new(Viewtype::Text);
msg.set_text(Some("hi there".to_string()));
send_msg(&ctx0, self_chat.id, &mut msg).await.unwrap();
// Send an attachment in the self chat
let file = ctx0.get_blobdir().join("hello.txt");
fs::write(&file, "i am attachment").await.unwrap();
let mut msg = Message::new(Viewtype::File);
msg.set_file(file.to_str().unwrap(), Some("text/plain"));
send_msg(&ctx0, self_chat.id, &mut msg).await.unwrap();
// Prepare to transfer backup.
let provider = BackupProvider::prepare(&ctx0).await.unwrap();
// Set up second device.
let ctx1 = tcm.unconfigured().await;
get_backup(&ctx1, provider.qr()).await.unwrap();
// Make sure the provider finishes without an error.
tokio::time::timeout(Duration::from_secs(30), provider)
.await
.expect("timed out")
.expect("error in provider");
// Check that we have the self message.
let self_chat = ctx1.get_self_chat().await;
let msgs = get_chat_msgs(&ctx1, self_chat.id).await.unwrap();
assert_eq!(msgs.len(), 2);
let msgid = match msgs.get(0).unwrap() {
ChatItem::Message { msg_id } => msg_id,
_ => panic!("wrong chat item"),
};
let msg = Message::load_from_db(&ctx1, *msgid).await.unwrap();
let text = msg.get_text().unwrap();
assert_eq!(text, "hi there");
let msgid = match msgs.get(1).unwrap() {
ChatItem::Message { msg_id } => msg_id,
_ => panic!("wrong chat item"),
};
let msg = Message::load_from_db(&ctx1, *msgid).await.unwrap();
let path = msg.get_file(&ctx1).unwrap();
let text = fs::read_to_string(&path).await.unwrap();
assert_eq!(text, "i am attachment");
// Check that both received the ImexProgress events.
ctx0.evtracker
.get_matching(|ev| matches!(ev, EventType::ImexProgress(1000)))
.await;
ctx1.evtracker
.get_matching(|ev| matches!(ev, EventType::ImexProgress(1000)))
.await;
}
#[test]
fn test_send_progress() {
let cases = [
((0, 100), 450),
((10, 100), 500),
((50, 100), 700),
((100, 100), 950),
];
for ((current_size, total_size), progress) in cases {
let out = EventType::from(SendProgress::TransferInProgress {
current_size,
total_size,
});
assert_eq!(out, EventType::ImexProgress(progress));
}
}
}

View File

@@ -11,6 +11,7 @@ use std::sync::atomic::Ordering;
use anyhow::{Context as _, Result};
use deltachat_derive::{FromSql, ToSql};
use rand::{thread_rng, Rng};
use tracing::{error, info, warn};
use crate::context::Context;
use crate::imap::Imap;
@@ -110,7 +111,7 @@ impl Job {
///
/// The Job is consumed by this method.
pub(crate) async fn save(self, context: &Context) -> Result<()> {
info!(context, "saving job {:?}", self);
info!("saving job {:?}", self);
if self.job_id != 0 {
context
@@ -153,7 +154,7 @@ impl<'a> Connection<'a> {
}
pub(crate) async fn perform_job(context: &Context, mut connection: Connection<'_>, mut job: Job) {
info!(context, "job {} started...", &job);
info!("Job {} started...", &job);
let try_res = match perform_job_action(context, &mut job, &mut connection, 0).await {
Status::RetryNow => perform_job_action(context, &mut job, &mut connection, 1).await,
@@ -165,42 +166,33 @@ pub(crate) async fn perform_job(context: &Context, mut connection: Connection<'_
let tries = job.tries + 1;
if tries < JOB_RETRIES {
info!(context, "increase job {} tries to {}", job, tries);
info!("Increase job {job} tries to {tries}.");
job.tries = tries;
let time_offset = get_backoff_time_offset(tries);
job.desired_timestamp = time() + time_offset;
info!(
context,
"job #{} not succeeded on try #{}, retry in {} seconds.",
job.job_id,
tries,
time_offset
job.job_id, tries, time_offset
);
job.save(context).await.unwrap_or_else(|err| {
error!(context, "failed to save job: {:#}", err);
error!("Failed to save job: {err:#}.");
});
} else {
info!(
context,
"remove job {} as it exhausted {} retries", job, JOB_RETRIES
);
info!("Remove job {job} as it exhausted {JOB_RETRIES} retries.");
job.delete(context).await.unwrap_or_else(|err| {
error!(context, "failed to delete job: {:#}", err);
error!("Failed to delete job: {err:#}.");
});
}
}
Status::Finished(res) => {
if let Err(err) = res {
warn!(
context,
"remove job {} as it failed with error {:#}", job, err
);
warn!("Remove job {job} as it failed with error {err:#}.");
} else {
info!(context, "remove job {} as it succeeded", job);
info!("Remove job {job} as it succeeded.");
}
job.delete(context).await.unwrap_or_else(|err| {
error!(context, "failed to delete job: {:#}", err);
error!("failed to delete job: {:#}", err);
});
}
}
@@ -212,13 +204,13 @@ async fn perform_job_action(
connection: &mut Connection<'_>,
tries: u32,
) -> Status {
info!(context, "begin immediate try {} of job {}", tries, job);
info!("Begin immediate try {tries} of job {job}.");
let try_res = match job.action {
Action::DownloadMsg => job.download_msg(context, connection.inbox()).await,
};
info!(context, "Finished immediate try {} of job {}", tries, job);
info!("Finished immediate try {tries} of job {job}.");
try_res
}
@@ -238,6 +230,7 @@ fn get_backoff_time_offset(tries: u32) -> i64 {
pub(crate) async fn schedule_resync(context: &Context) -> Result<()> {
context.resync_request.store(true, Ordering::Relaxed);
context
.scheduler
.interrupt_inbox(InterruptInfo {
probe_network: false,
})
@@ -249,8 +242,11 @@ pub(crate) async fn schedule_resync(context: &Context) -> Result<()> {
pub async fn add(context: &Context, job: Job) -> Result<()> {
job.save(context).await.context("failed to save job")?;
info!(context, "interrupt: imap");
context.interrupt_inbox(InterruptInfo::new(false)).await;
info!("Interrupt: IMAP.");
context
.scheduler
.interrupt_inbox(InterruptInfo::new(false))
.await;
Ok(())
}
@@ -260,7 +256,7 @@ pub async fn add(context: &Context, job: Job) -> Result<()> {
/// jobs, this is tricky and probably wrong currently. Look at the
/// SQL queries for details.
pub(crate) async fn load_next(context: &Context, info: &InterruptInfo) -> Result<Option<Job>> {
info!(context, "loading job");
info!("Loading job.");
let query;
let params;
@@ -312,19 +308,19 @@ LIMIT 1;
Ok(job) => return Ok(job),
Err(err) => {
// Remove invalid job from the DB
info!(context, "cleaning up job, because of {:#}", err);
info!("Cleaning up job, because of {err:#}.");
// TODO: improve by only doing a single query
let id = context
.sql
.query_row(query, params.clone(), |row| row.get::<_, i32>(0))
.await
.context("Failed to retrieve invalid job ID from the database")?;
.context("failed to retrieve invalid job ID from the database")?;
context
.sql
.execute("DELETE FROM jobs WHERE id=?;", paramsv![id])
.await
.with_context(|| format!("Failed to delete invalid job {id}"))?;
.with_context(|| format!("failed to delete invalid job {id}"))?;
}
}
}

View File

@@ -14,6 +14,7 @@ pub use pgp::composed::{SignedPublicKey, SignedSecretKey};
use pgp::ser::Serialize;
use pgp::types::{KeyTrait, SecretKeyTrait};
use tokio::runtime::Handle;
use tracing::info;
use crate::config::Config;
use crate::constants::KeyGenType;
@@ -211,14 +212,13 @@ async fn generate_keypair(context: &Context) -> Result<KeyPair> {
let start = std::time::SystemTime::now();
let keytype = KeyGenType::from_i32(context.get_config_int(Config::KeyGenType).await?)
.unwrap_or_default();
info!(context, "Generating keypair with type {}", keytype);
info!("Generating keypair with type {}", keytype);
let keypair = Handle::current()
.spawn_blocking(move || crate::pgp::create_keypair(addr, keytype))
.await??;
store_self_keypair(context, &keypair, KeyPairUse::Default).await?;
info!(
context,
"Keypair generated in {:.3}s.",
start.elapsed().unwrap_or_default().as_secs()
);

View File

@@ -40,7 +40,6 @@ pub trait ToSql: rusqlite::ToSql + Send + Sync {}
impl<T: rusqlite::ToSql + Send + Sync> ToSql for T {}
#[macro_use]
pub mod log;
#[cfg(feature = "internals")]

View File

@@ -8,6 +8,7 @@ use async_channel::Receiver;
use bitflags::bitflags;
use quick_xml::events::{BytesEnd, BytesStart, BytesText};
use tokio::time::timeout;
use tracing::{info, warn};
use crate::chat::{self, ChatId};
use crate::contact::ContactId;
@@ -267,7 +268,7 @@ pub async fn send_locations_to_chat(
}
context.emit_event(EventType::ChatModified(chat_id));
if 0 != seconds {
context.interrupt_location().await;
context.scheduler.interrupt_location().await;
}
Ok(())
}
@@ -337,9 +338,9 @@ pub async fn set(context: &Context, latitude: f64, longitude: f64, accuracy: f64
ContactId::SELF,
]
).await {
warn!(context, "failed to store location {:#}", err);
warn!( "failed to store location {:#}", err);
} else {
info!(context, "stored location for chat {}", chat_id);
info!("stored location for chat {}", chat_id);
continue_streaming = true;
}
}
@@ -639,7 +640,7 @@ pub(crate) async fn location_loop(context: &Context, interrupt_receiver: Receive
loop {
let next_event = match maybe_send_locations(context).await {
Err(err) => {
warn!(context, "maybe_send_locations failed: {:#}", err);
warn!("maybe_send_locations failed: {:#}", err);
Some(60) // Retry one minute later.
}
Ok(next_event) => next_event,
@@ -652,7 +653,6 @@ pub(crate) async fn location_loop(context: &Context, interrupt_receiver: Receive
};
info!(
context,
"Location loop is waiting for {} or interrupt",
duration_to_str(duration)
);
@@ -719,10 +719,7 @@ async fn maybe_send_locations(context: &Context) -> Result<Option<u64>> {
// Send location-only message.
// Pending locations are attached automatically to every message,
// so also to this empty text message.
info!(
context,
"Chat {} has pending locations, sending them.", chat_id
);
info!("Chat {} has pending locations, sending them.", chat_id);
let mut msg = Message::new(Viewtype::Text);
msg.hidden = true;
msg.param.set_cmd(SystemMessage::LocationOnly);
@@ -730,8 +727,8 @@ async fn maybe_send_locations(context: &Context) -> Result<Option<u64>> {
} else {
// Wait until pending locations can be sent.
info!(
context,
"Chat {} has pending locations, but they can't be sent yet.", chat_id
"Chat {} has pending locations, but they can't be sent yet.",
chat_id
);
next_event = next_event
.into_iter()
@@ -740,17 +737,14 @@ async fn maybe_send_locations(context: &Context) -> Result<Option<u64>> {
}
} else {
info!(
context,
"Chat {} has location streaming enabled, but no pending locations.", chat_id
"Chat {} has location streaming enabled, but no pending locations.",
chat_id
);
}
} else {
// Location streaming was either explicitly disabled (locations_send_begin = 0) or
// locations_send_until is in the past.
info!(
context,
"Disabling location streaming for chat {}.", chat_id
);
info!("Disabling location streaming for chat {}.", chat_id);
context
.sql
.execute(

View File

@@ -4,36 +4,6 @@
use crate::context::Context;
#[macro_export]
macro_rules! info {
($ctx:expr, $msg:expr) => {
info!($ctx, $msg,)
};
($ctx:expr, $msg:expr, $($args:expr),* $(,)?) => {{
let formatted = format!($msg, $($args),*);
let full = format!("{file}:{line}: {msg}",
file = file!(),
line = line!(),
msg = &formatted);
$ctx.emit_event($crate::EventType::Info(full));
}};
}
#[macro_export]
macro_rules! warn {
($ctx:expr, $msg:expr) => {
warn!($ctx, $msg,)
};
($ctx:expr, $msg:expr, $($args:expr),* $(,)?) => {{
let formatted = format!($msg, $($args),*);
let full = format!("{file}:{line}: {msg}",
file = file!(),
line = line!(),
msg = &formatted);
$ctx.emit_event($crate::EventType::Warning(full));
}};
}
#[macro_export]
macro_rules! error {
($ctx:expr, $msg:expr) => {
@@ -41,7 +11,6 @@ macro_rules! error {
};
($ctx:expr, $msg:expr, $($args:expr),* $(,)?) => {{
let formatted = format!($msg, $($args),*);
$ctx.set_last_error(&formatted);
$ctx.emit_event($crate::EventType::Error(formatted));
}};
}
@@ -50,13 +19,13 @@ impl Context {
/// Set last error string.
/// Implemented as blocking as used from macros in different, not always async blocks.
pub fn set_last_error(&self, error: &str) {
let mut last_error = self.last_error.write().unwrap();
let mut last_error = self.events.last_error.write().unwrap();
*last_error = error.to_string();
}
/// Get last error string.
pub fn get_last_error(&self) -> String {
let last_error = &*self.last_error.read().unwrap();
let last_error = &*self.events.last_error.read().unwrap();
last_error.clone()
}
}
@@ -168,12 +137,6 @@ mod tests {
error!(t, "foo-error");
assert_eq!(t.get_last_error(), "foo-error");
warn!(t, "foo-warning");
assert_eq!(t.get_last_error(), "foo-error");
info!(t, "foo-info");
assert_eq!(t.get_last_error(), "foo-error");
error!(t, "bar-error");
error!(t, "baz-error");
assert_eq!(t.get_last_error(), "baz-error");

View File

@@ -7,6 +7,7 @@ use anyhow::{ensure, format_err, Context as _, Result};
use deltachat_derive::{FromSql, ToSql};
use rusqlite::types::ValueRef;
use serde::{Deserialize, Serialize};
use tracing::{error, info, warn};
use crate::chat::{self, Chat, ChatId};
use crate::config::Config;
@@ -337,7 +338,6 @@ impl Message {
Ok(t) => t,
Err(_) => {
warn!(
context,
concat!(
"dc_msg_load_from_db: could not get ",
"text column as non-lossy utf8 id {}"
@@ -1419,7 +1419,10 @@ pub async fn delete_msgs(context: &Context, msg_ids: &[MsgId]) -> Result<()> {
}
// Interrupt Inbox loop to start message deletion and run housekeeping.
context.interrupt_inbox(InterruptInfo::new(false)).await;
context
.scheduler
.interrupt_inbox(InterruptInfo::new(false))
.await;
Ok(())
}
@@ -1508,7 +1511,7 @@ pub async fn markseen_msgs(context: &Context, msg_ids: Vec<MsgId>) -> Result<()>
&& (curr_state == MessageState::InFresh || curr_state == MessageState::InNoticed)
{
update_msg_state(context, id, MessageState::InSeen).await?;
info!(context, "Seen message {}.", id);
info!("Seen message {}.", id);
markseen_on_imap_table(context, &curr_rfc724_mid).await?;
@@ -1531,7 +1534,10 @@ pub async fn markseen_msgs(context: &Context, msg_ids: Vec<MsgId>) -> Result<()>
)
.await
.context("failed to insert into smtp_mdns")?;
context.interrupt_smtp(InterruptInfo::new(false)).await;
context
.scheduler
.interrupt_smtp(InterruptInfo::new(false))
.await;
}
}
updated_chat_ids.insert(curr_chat_id);
@@ -1589,11 +1595,11 @@ pub(crate) async fn set_msg_failed(context: &Context, msg_id: MsgId, error: &str
if let Ok(mut msg) = Message::load_from_db(context, msg_id).await {
if msg.state.can_fail() {
msg.state = MessageState::OutFailed;
warn!(context, "{} failed: {}", msg_id, error);
warn!("{} failed: {}", msg_id, error);
} else {
warn!(
context,
"{} seems to have failed ({}), but state is {}", msg_id, error, msg.state
"{} seems to have failed ({}), but state is {}",
msg_id, error, msg.state
)
}
@@ -1610,7 +1616,7 @@ pub(crate) async fn set_msg_failed(context: &Context, msg_id: MsgId, error: &str
msg_id,
}),
Err(e) => {
warn!(context, "{:?}", e);
warn!("{:?}", e);
}
}
}
@@ -1624,10 +1630,7 @@ pub async fn handle_mdn(
timestamp_sent: i64,
) -> Result<Option<(ChatId, MsgId)>> {
if from_id == ContactId::SELF {
warn!(
context,
"ignoring MDN sent to self, this is a bug on the sender device"
);
warn!("ignoring MDN sent to self, this is a bug on the sender device");
// This is not an error on our side,
// we successfully ignored an invalid MDN and return `Ok`.
@@ -1661,7 +1664,6 @@ pub async fn handle_mdn(
res
} else {
info!(
context,
"handle_mdn found no message with Message-ID {:?} sent by us in the database",
rfc724_mid
);
@@ -1779,7 +1781,7 @@ async fn ndn_maybe_add_info_msg(
Chattype::Mailinglist => {
// ndn_maybe_add_info_msg() is about the case when delivery to the group failed.
// If we get an NDN for the mailing list, just issue a warning.
warn!(context, "ignoring NDN for mailing list.");
warn!("ignoring NDN for mailing list.");
}
Chattype::Single | Chattype::Undefined => {}
}
@@ -1800,7 +1802,7 @@ pub async fn get_unblocked_msg_cnt(context: &Context) -> usize {
{
Ok(res) => res,
Err(err) => {
error!(context, "get_unblocked_msg_cnt() failed. {:#}", err);
error!("get_unblocked_msg_cnt() failed. {:#}", err);
0
}
}
@@ -1820,7 +1822,7 @@ pub async fn get_request_msg_cnt(context: &Context) -> usize {
{
Ok(res) => res,
Err(err) => {
error!(context, "get_request_msg_cnt() failed. {:#}", err);
error!("get_request_msg_cnt() failed. {:#}", err);
0
}
}
@@ -1891,7 +1893,7 @@ pub(crate) async fn rfc724_mid_exists(
) -> Result<Option<MsgId>> {
let rfc724_mid = rfc724_mid.trim_start_matches('<').trim_end_matches('>');
if rfc724_mid.is_empty() {
warn!(context, "Empty rfc724_mid passed to rfc724_mid_exists");
warn!("Empty rfc724_mid passed to rfc724_mid_exists");
return Ok(None);
}

View File

@@ -8,6 +8,7 @@ use chrono::TimeZone;
use format_flowed::{format_flowed, format_flowed_quote};
use lettre_email::{mime, Address, Header, MimeMultipartType, PartBuilder};
use tokio::fs;
use tracing::{info, warn};
use crate::blob::BlobObject;
use crate::chat::Chat;
@@ -651,8 +652,7 @@ impl<'a> MimeFactory<'a> {
};
let peerstates = self.peerstates_for_recipients(context).await?;
let should_encrypt =
encrypt_helper.should_encrypt(context, e2ee_guaranteed, &peerstates)?;
let should_encrypt = encrypt_helper.should_encrypt(e2ee_guaranteed, &peerstates)?;
let is_encrypted = should_encrypt && !force_plaintext;
let message = if parts.is_empty() {
@@ -730,7 +730,6 @@ impl<'a> MimeFactory<'a> {
if std::env::var(crate::DCC_MIME_DEBUG).is_ok() {
info!(
context,
"mimefactory: unencrypted message mime-body:\n{}",
message.clone().build().as_string(),
);
@@ -819,7 +818,6 @@ impl<'a> MimeFactory<'a> {
if std::env::var(crate::DCC_MIME_DEBUG).is_ok() {
info!(
context,
"mimefactory: outgoing message mime-body:\n{}",
outer_message.clone().build().as_string(),
);
@@ -940,7 +938,6 @@ impl<'a> MimeFactory<'a> {
& DC_FROM_HANDSHAKE
{
info!(
context,
"sending secure-join message \'{}\' >>>>>>>>>>>>>>>>>>>>>>>>>",
"vg-member-added",
);
@@ -1025,8 +1022,8 @@ impl<'a> MimeFactory<'a> {
let step = msg.param.get(Param::Arg).unwrap_or_default();
if !step.is_empty() {
info!(
context,
"sending secure-join message \'{}\' >>>>>>>>>>>>>>>>>>>>>>>>>", step,
"sending secure-join message \'{}\' >>>>>>>>>>>>>>>>>>>>>>>>>",
step,
);
headers
.protected
@@ -1074,7 +1071,7 @@ impl<'a> MimeFactory<'a> {
}
if let Some(grpimage) = grpimage {
info!(context, "setting group image '{}'", grpimage);
info!("setting group image '{}'", grpimage);
let mut meta = Message {
viewtype: Viewtype::Image,
..Default::default()
@@ -1234,7 +1231,7 @@ impl<'a> MimeFactory<'a> {
match self.get_location_kml_part(context).await {
Ok(part) => parts.push(part),
Err(err) => {
warn!(context, "mimefactory: could not send location: {}", err);
warn!("mimefactory: could not send location: {}", err);
}
}
}
@@ -1265,7 +1262,7 @@ impl<'a> MimeFactory<'a> {
"Chat-User-Avatar".into(),
format!("base64:{avatar}"),
)),
Err(err) => warn!(context, "mimefactory: cannot attach selfavatar: {}", err),
Err(err) => warn!("mimefactory: cannot attach selfavatar: {}", err),
},
None => headers
.protected

View File

@@ -12,6 +12,7 @@ use format_flowed::unformat_flowed;
use lettre_email::mime::{self, Mime};
use mailparse::{addrparse_header, DispositionType, MailHeader, MailHeaderMap, SingleInfo};
use once_cell::sync::Lazy;
use tracing::{error, info, warn};
use crate::aheader::{Aheader, EncryptPreference};
use crate::blob::BlobObject;
@@ -213,7 +214,6 @@ impl MimeMessage {
// Parse IMF headers.
MimeMessage::merge_headers(
context,
&mut headers,
&mut recipients,
&mut from,
@@ -231,7 +231,6 @@ impl MimeMessage {
// messages are shown as unencrypted anyway.
MimeMessage::merge_headers(
context,
&mut headers,
&mut recipients,
&mut from,
@@ -281,14 +280,13 @@ impl MimeMessage {
let public_keyring = keyring_from_peerstate(decryption_info.peerstate.as_ref());
let (mail, mut signatures, encrypted) = match tokio::task::block_in_place(|| {
try_decrypt(context, &mail, &private_keyring, &public_keyring)
try_decrypt(&mail, &private_keyring, &public_keyring)
}) {
Ok(Some((raw, signatures))) => {
mail_raw = raw;
let decrypted_mail = mailparse::parse_mail(&mail_raw)?;
if std::env::var(crate::DCC_MIME_DEBUG).is_ok() {
info!(
context,
"decrypted message mime-body:\n{}",
String::from_utf8_lossy(&mail_raw),
);
@@ -297,7 +295,7 @@ impl MimeMessage {
}
Ok(None) => (Ok(mail), HashSet::new(), false),
Err(err) => {
warn!(context, "decryption failed: {:#}", err);
warn!("decryption failed: {:#}", err);
(Err(err), HashSet::new(), false)
}
};
@@ -336,7 +334,6 @@ impl MimeMessage {
headers.remove("subject");
MimeMessage::merge_headers(
context,
&mut headers,
&mut recipients,
&mut signed_from,
@@ -355,10 +352,7 @@ impl MimeMessage {
// Or it's because someone is doing some replay attack
// - OTOH, I can't come up with an attack scenario
// where this would be useful.
warn!(
context,
"From header in signed part doesn't match the outer one",
);
warn!("From header in signed part doesn't match the outer one",);
}
}
}
@@ -468,7 +462,7 @@ impl MimeMessage {
}
/// Parses system messages.
fn parse_system_message_headers(&mut self, context: &Context) {
fn parse_system_message_headers(&mut self) {
if self.get_header(HeaderDef::AutocryptSetupMessage).is_some() {
self.parts.retain(|part| {
part.mimetype.is_none()
@@ -478,7 +472,7 @@ impl MimeMessage {
if self.parts.len() == 1 {
self.is_system_message = SystemMessage::AutocryptSetupMessage;
} else {
warn!(context, "could not determine ASM mime-part");
warn!("could not determine ASM mime-part");
}
} else if let Some(value) = self.get_header(HeaderDef::ChatContent) {
if value == "location-streaming-enabled" {
@@ -602,7 +596,7 @@ impl MimeMessage {
}
async fn parse_headers(&mut self, context: &Context) -> Result<()> {
self.parse_system_message_headers(context);
self.parse_system_message_headers();
self.parse_avatar_headers(context).await;
self.parse_videochat_headers();
if self.delivery_report.is_none() {
@@ -659,8 +653,8 @@ impl MimeMessage {
}
} else {
warn!(
context,
"{} requested a read receipt to {}, ignoring", from, dn_to.addr
"{} requested a read receipt to {}, ignoring",
from, dn_to.addr
);
}
}
@@ -724,10 +718,7 @@ impl MimeMessage {
{
Ok(blob) => Some(AvatarAction::Change(blob.as_name().to_string())),
Err(err) => {
warn!(
context,
"Could not save decoded avatar to blob file: {:#}", err
);
warn!("Could not save decoded avatar to blob file: {:#}", err);
None
}
}
@@ -795,13 +786,12 @@ impl MimeMessage {
if mail.ctype.params.get("protected-headers").is_some() {
if mail.ctype.mimetype == "text/rfc822-headers" {
warn!(
context,
"Protected headers found in text/rfc822-headers attachment: Will be ignored.",
);
return Ok(false);
}
warn!(context, "Ignoring nested protected headers");
warn!( "Ignoring nested protected headers");
}
enum MimeS {
@@ -920,7 +910,7 @@ impl MimeMessage {
if mail.subparts.len() >= 2 {
match mail.ctype.params.get("report-type").map(|s| s as &str) {
Some("disposition-notification") => {
if let Some(report) = self.process_report(context, mail)? {
if let Some(report) = self.process_report(mail)? {
self.mdn_reports.push(report);
}
@@ -938,7 +928,7 @@ impl MimeMessage {
}
// Some providers, e.g. Tiscali, forget to set the report-type. So, if it's None, assume that it might be delivery-status
Some("delivery-status") | None => {
if let Some(report) = self.process_delivery_status(context, mail)? {
if let Some(report) = self.process_delivery_status(mail)? {
self.delivery_report = Some(report);
}
@@ -1002,7 +992,7 @@ impl MimeMessage {
let (mime_type, msg_type) = get_mime_type(mail)?;
let raw_mime = mail.ctype.mimetype.to_lowercase();
let filename = get_attachment_filename(context, mail)?;
let filename = get_attachment_filename(mail)?;
let old_part_count = self.parts.len();
@@ -1022,7 +1012,7 @@ impl MimeMessage {
None => {
match mime_type.type_() {
mime::IMAGE | mime::AUDIO | mime::VIDEO | mime::APPLICATION => {
warn!(context, "Missing attachment");
warn!("Missing attachment");
return Ok(false);
}
mime::TEXT
@@ -1033,7 +1023,7 @@ impl MimeMessage {
let decoded_data = match mail.get_body() {
Ok(decoded_data) => decoded_data,
Err(err) => {
warn!(context, "Invalid body parsed {:#}", err);
warn!("Invalid body parsed {:#}", err);
// Note that it's not always an error - might be no data
return Ok(false);
}
@@ -1053,7 +1043,7 @@ impl MimeMessage {
let decoded_data = match mail.get_body() {
Ok(decoded_data) => decoded_data,
Err(err) => {
warn!(context, "Invalid body parsed {:#}", err);
warn!("Invalid body parsed {:#}", err);
// Note that it's not always an error - might be no data
return Ok(false);
}
@@ -1185,7 +1175,7 @@ impl MimeMessage {
if filename.starts_with("location") || filename.starts_with("message") {
let parsed = location::Kml::parse(decoded_data)
.map_err(|err| {
warn!(context, "failed to parse kml part: {:#}", err);
warn!("failed to parse kml part: {:#}", err);
})
.ok();
if filename.starts_with("location") {
@@ -1203,7 +1193,7 @@ impl MimeMessage {
self.sync_items = context
.parse_sync_items(serialized)
.map_err(|err| {
warn!(context, "failed to parse sync data: {:#}", err);
warn!("failed to parse sync data: {:#}", err);
})
.ok();
return Ok(());
@@ -1224,13 +1214,13 @@ impl MimeMessage {
Ok(blob) => blob,
Err(err) => {
error!(
context,
"Could not add blob for mime part {}, error {:#}", filename, err
"Could not add blob for mime part {}, error {:#}",
filename, err
);
return Ok(());
}
};
info!(context, "added blobfile: {:?}", blob.as_name());
info!("added blobfile: {:?}", blob.as_name());
/* create and register Mime part referencing the new Blob object */
let mut part = Part::default();
@@ -1261,23 +1251,20 @@ impl MimeMessage {
) -> Result<bool> {
let key = match str::from_utf8(decoded_data) {
Err(err) => {
warn!(context, "PGP key attachment is not a UTF-8 file: {}", err);
warn!("PGP key attachment is not a UTF-8 file: {}", err);
return Ok(false);
}
Ok(key) => key,
};
let key = match SignedPublicKey::from_asc(key) {
Err(err) => {
warn!(
context,
"PGP key attachment is not an ASCII-armored file: {:#}", err
);
warn!("PGP key attachment is not an ASCII-armored file: {:#}", err);
return Ok(false);
}
Ok((key, _)) => key,
};
if let Err(err) = key.verify() {
warn!(context, "attached PGP key verification failed: {}", err);
warn!("attached PGP key verification failed: {}", err);
return Ok(false);
}
if !key.details.users.iter().any(|user| {
@@ -1293,19 +1280,17 @@ impl MimeMessage {
// user have an Autocrypt-capable MUA and also attaches a key, but if that's the
// case, let 'em first disable Autocrypt and then change the key by attaching it.
warn!(
context,
"not using attached PGP key for peer '{}' because another one is already set \
with prefer-encrypt={}",
peerstate.addr,
peerstate.prefer_encrypt,
peerstate.addr, peerstate.prefer_encrypt,
);
return Ok(false);
}
}
peerstate.public_key = Some(key);
info!(
context,
"using attached PGP key for peer '{}' with prefer-encrypt=mutual", peerstate.addr,
"using attached PGP key for peer '{}' with prefer-encrypt=mutual",
peerstate.addr,
);
peerstate.prefer_encrypt = EncryptPreference::Mutual;
peerstate.save_to_db(&context.sql).await?;
@@ -1357,7 +1342,6 @@ impl MimeMessage {
}
fn merge_headers(
context: &Context,
headers: &mut HashMap<String, String>,
recipients: &mut Vec<SingleInfo>,
from: &mut Option<SingleInfo>,
@@ -1376,7 +1360,7 @@ impl MimeMessage {
Ok(addrlist) => {
*chat_disposition_notification_to = addrlist.extract_single_info();
}
Err(e) => warn!(context, "Could not read {} address: {}", key, e),
Err(e) => warn!("Could not read {} address: {}", key, e),
}
} else {
let value = field.get_value();
@@ -1398,11 +1382,7 @@ impl MimeMessage {
}
}
fn process_report(
&self,
context: &Context,
report: &mailparse::ParsedMail<'_>,
) -> Result<Option<Report>> {
fn process_report(&self, report: &mailparse::ParsedMail<'_>) -> Result<Option<Report>> {
// parse as mailheaders
let report_body = if let Some(subpart) = report.subparts.get(1) {
subpart.get_body_raw()?
@@ -1433,7 +1413,6 @@ impl MimeMessage {
}));
}
warn!(
context,
"ignoring unknown disposition-notification, Message-Id: {:?}",
report_fields.get_header_value(HeaderDef::MessageId)
);
@@ -1443,7 +1422,6 @@ impl MimeMessage {
fn process_delivery_status(
&self,
context: &Context,
report: &mailparse::ParsedMail<'_>,
) -> Result<Option<DeliveryReport>> {
// Assume failure.
@@ -1455,7 +1433,7 @@ impl MimeMessage {
if status_part.ctype.mimetype != "message/delivery-status"
&& status_part.ctype.mimetype != "message/global-delivery-status"
{
warn!(context, "Second part of Delivery Status Notification is not message/delivery-status or message/global-delivery-status, ignoring");
warn!( "Second part of Delivery Status Notification is not message/delivery-status or message/global-delivery-status, ignoring");
return Ok(None);
}
@@ -1469,14 +1447,14 @@ impl MimeMessage {
let (status_fields, _) = mailparse::parse_headers(status_body)?;
if let Some(action) = status_fields.get_first_value("action") {
if action != "failed" {
info!(context, "DSN with {:?} action", action);
info!("DSN with {:?} action", action);
failure = false;
}
} else {
warn!(context, "DSN without action");
warn!("DSN without action");
}
} else {
warn!(context, "DSN without per-recipient fields");
warn!("DSN without per-recipient fields");
}
} else {
// No message/delivery-status part.
@@ -1513,7 +1491,6 @@ impl MimeMessage {
}
warn!(
context,
"ignoring unknown ndn-notification, Message-Id: {:?}",
report_fields.get_header_value(HeaderDef::MessageId)
);
@@ -1638,7 +1615,7 @@ impl MimeMessage {
}
Ok(None) => {}
Err(err) => {
warn!(context, "failed to handle_mdn: {:#}", err);
warn!("failed to handle_mdn: {:#}", err);
}
}
}
@@ -1651,7 +1628,7 @@ impl MimeMessage {
.find(|p| p.typ == Viewtype::Text)
.map(|p| p.msg.clone());
if let Err(e) = message::handle_ndn(context, delivery_report, error).await {
warn!(context, "Could not handle ndn: {}", e);
warn!("Could not handle ndn: {}", e);
}
}
}
@@ -1699,7 +1676,7 @@ async fn update_gossip_peerstates(
let header = match value.parse::<Aheader>() {
Ok(header) => header,
Err(err) => {
warn!(context, "Failed parsing Autocrypt-Gossip header: {}", err);
warn!("Failed parsing Autocrypt-Gossip header: {}", err);
continue;
}
};
@@ -1709,16 +1686,16 @@ async fn update_gossip_peerstates(
.any(|info| addr_cmp(&info.addr, &header.addr))
{
warn!(
context,
"Ignoring gossiped \"{}\" as the address is not in To/Cc list.", &header.addr,
"Ignoring gossiped \"{}\" as the address is not in To/Cc list.",
&header.addr,
);
continue;
}
if addr_cmp(from, &header.addr) {
// Non-standard, but anyway we can't update the cached peerstate here.
warn!(
context,
"Ignoring gossiped \"{}\" as it equals the From address", &header.addr,
"Ignoring gossiped \"{}\" as it equals the From address",
&header.addr,
);
continue;
}
@@ -1909,10 +1886,7 @@ fn is_attachment_disposition(mail: &mailparse::ParsedMail<'_>) -> bool {
/// returned. If Content-Disposition is "attachment" but filename is
/// not specified, filename is guessed. If Content-Disposition cannot
/// be parsed, returns an error.
fn get_attachment_filename(
context: &Context,
mail: &mailparse::ParsedMail,
) -> Result<Option<String>> {
fn get_attachment_filename(mail: &mailparse::ParsedMail) -> Result<Option<String>> {
let ct = mail.get_content_disposition();
// try to get file name as "encoded-words" from
@@ -1924,7 +1898,7 @@ fn get_attachment_filename(
// be graceful and just use the original name.
// some MUA, including Delta Chat up to core1.50,
// use `filename*` mistakenly for simple encoded-words without following rfc2231
warn!(context, "apostrophed encoding invalid: {}", name);
warn!("apostrophed encoding invalid: {}", name);
desired_filename = Some(name);
}
}
@@ -1976,10 +1950,10 @@ pub(crate) fn get_list_post(headers: &[MailHeader]) -> Option<String> {
.map(|s| s.addr)
}
fn get_all_addresses_from_header<F>(headers: &[MailHeader], pred: F) -> Vec<SingleInfo>
where
F: Fn(String) -> bool,
{
fn get_all_addresses_from_header(
headers: &[MailHeader],
pred: fn(String) -> bool,
) -> Vec<SingleInfo> {
let mut result: Vec<SingleInfo> = Default::default();
headers
@@ -2154,147 +2128,123 @@ mod tests {
assert!(is_attachment_disposition(&mail.subparts[1]));
}
fn load_mail_with_attachment<'a>(t: &'a TestContext, raw: &'a [u8]) -> ParsedMail<'a> {
fn load_mail_with_attachment(raw: &[u8]) -> ParsedMail<'_> {
let mail = mailparse::parse_mail(raw).unwrap();
assert!(get_attachment_filename(t, &mail).unwrap().is_none());
assert!(get_attachment_filename(t, &mail.subparts[0])
assert!(get_attachment_filename(&mail).unwrap().is_none());
assert!(get_attachment_filename(&mail.subparts[0])
.unwrap()
.is_none());
mail
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_simple.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_simple.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("test.html".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_encoded_words() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_encoded_words.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_encoded_words() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_encoded_words.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("Maßnahmen Okt. 2020.html".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_encoded_words_binary() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_encoded_words_binary.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_encoded_words_binary() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_encoded_words_binary.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some(" § 165 Abs".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_encoded_words_windows1251() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_encoded_words_windows1251.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_encoded_words_windows1251() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_encoded_words_windows1251.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("file Что нового 2020.pdf".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_encoded_words_cont() {
#[test]
fn test_get_attachment_filename_encoded_words_cont() {
// test continued encoded-words and also test apostropes work that way
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_encoded_words_cont.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_encoded_words_cont.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("Maßn'ah'men Okt. 2020.html".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_encoded_words_bad_delimiter() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_encoded_words_bad_delimiter.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_encoded_words_bad_delimiter() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_encoded_words_bad_delimiter.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
// not decoded as a space is missing after encoded-words part
assert_eq!(filename, Some("=?utf-8?q?foo?=.bar".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_apostrophed() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_apostrophed.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_apostrophed() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_apostrophed.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("Maßnahmen Okt. 2021.html".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_apostrophed_cont() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_apostrophed_cont.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_apostrophed_cont() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_apostrophed_cont.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("Maßnahmen März 2022.html".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_apostrophed_windows1251() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_apostrophed_windows1251.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_apostrophed_windows1251() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_apostrophed_windows1251.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("программирование.HTM".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_apostrophed_cp1252() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_apostrophed_cp1252.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_apostrophed_cp1252() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_apostrophed_cp1252.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("Auftragsbestätigung.pdf".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_apostrophed_invalid() {
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_apostrophed_invalid.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
#[test]
fn test_get_attachment_filename_apostrophed_invalid() {
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_apostrophed_invalid.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("somedäüta.html.zip".to_string()))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_attachment_filename_combined() {
#[test]
fn test_get_attachment_filename_combined() {
// test that if `filename` and `filename*0` are given, the filename is not doubled
let t = TestContext::new().await;
let mail = load_mail_with_attachment(
&t,
include_bytes!("../test-data/message/attach_filename_combined.eml"),
);
let filename = get_attachment_filename(&t, &mail.subparts[1]).unwrap();
let mail = load_mail_with_attachment(include_bytes!(
"../test-data/message/attach_filename_combined.eml"
));
let filename = get_attachment_filename(&mail.subparts[1]).unwrap();
assert_eq!(filename, Some("Maßnahmen Okt. 2020.html".to_string()))
}

View File

@@ -8,6 +8,7 @@ use anyhow::{Context as _, Error, Result};
use tokio::net::{lookup_host, TcpStream};
use tokio::time::timeout;
use tokio_io_timeout::TimeoutStream;
use tracing::{info, warn};
use crate::context::Context;
use crate::tools::time;
@@ -50,8 +51,8 @@ async fn lookup_host_with_cache(
Ok(res) => res,
Err(err) => {
warn!(
context,
"DNS resolution for {}:{} failed: {:#}.", hostname, port, err
"DNS resolution for {}:{} failed: {:#}.",
hostname, port, err
);
Vec::new()
}
@@ -64,7 +65,7 @@ async fn lookup_host_with_cache(
continue;
}
info!(context, "Resolved {}:{} into {}.", hostname, port, &addr);
info!("Resolved {}:{} into {}.", hostname, port, &addr);
// Update the cache.
context
@@ -110,8 +111,8 @@ async fn lookup_host_with_cache(
}
Err(err) => {
warn!(
context,
"Failed to parse cached address {:?}: {:#}.", cached_address, err
"Failed to parse cached address {:?}: {:#}.",
cached_address, err
);
}
}
@@ -163,10 +164,7 @@ pub(crate) async fn connect_tcp(
break;
}
Err(err) => {
warn!(
context,
"Failed to connect to {}: {:#}.", resolved_addr, err
);
warn!("Failed to connect to {}: {:#}.", resolved_addr, err);
last_error = Some(err);
}
}

View File

@@ -2,7 +2,7 @@ use async_native_tls::TlsStream;
use fast_socks5::client::Socks5Stream;
use std::pin::Pin;
use std::time::Duration;
use tokio::io::{AsyncRead, AsyncWrite, BufWriter};
use tokio::io::{AsyncBufRead, AsyncRead, AsyncWrite, BufStream, BufWriter};
use tokio_io_timeout::TimeoutStream;
pub(crate) trait SessionStream:
@@ -22,6 +22,11 @@ impl<T: SessionStream> SessionStream for TlsStream<T> {
self.get_mut().set_read_timeout(timeout);
}
}
impl<T: SessionStream> SessionStream for BufStream<T> {
fn set_read_timeout(&mut self, timeout: Option<Duration>) {
self.get_mut().set_read_timeout(timeout);
}
}
impl<T: SessionStream> SessionStream for BufWriter<T> {
fn set_read_timeout(&mut self, timeout: Option<Duration>) {
self.get_mut().set_read_timeout(timeout);
@@ -39,3 +44,8 @@ impl<T: SessionStream> SessionStream for Socks5Stream<T> {
self.get_socket_mut().set_read_timeout(timeout)
}
}
/// Session stream with a read buffer.
pub(crate) trait SessionBufStream: SessionStream + AsyncBufRead {}
impl<T: SessionStream + AsyncBufRead> SessionBufStream for T {}

View File

@@ -5,6 +5,7 @@ use std::collections::HashMap;
use anyhow::Result;
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
use serde::Deserialize;
use tracing::{info, warn};
use crate::config::Config;
use crate::context::Context;
@@ -62,7 +63,7 @@ pub async fn get_oauth2_url(
redirect_uri: &str,
) -> Result<Option<String>> {
let socks5_enabled = context.get_config_bool(Config::Socks5Enabled).await?;
if let Some(oauth2) = Oauth2::from_address(context, addr, socks5_enabled).await {
if let Some(oauth2) = Oauth2::from_address(addr, socks5_enabled).await {
context
.sql
.set_raw_config("oauth2_pending_redirect_uri", Some(redirect_uri))
@@ -83,7 +84,7 @@ pub(crate) async fn get_oauth2_access_token(
regenerate: bool,
) -> Result<Option<String>> {
let socks5_enabled = context.get_config_bool(Config::Socks5Enabled).await?;
if let Some(oauth2) = Oauth2::from_address(context, addr, socks5_enabled).await {
if let Some(oauth2) = Oauth2::from_address(addr, socks5_enabled).await {
let lock = context.oauth2_mutex.lock().await;
// read generated token
@@ -105,7 +106,7 @@ pub(crate) async fn get_oauth2_access_token(
let (redirect_uri, token_url, update_redirect_uri_on_success) =
if refresh_token.is_none() || refresh_token_for != code {
info!(context, "Generate OAuth2 refresh_token and access_token...",);
info!("Generate OAuth2 refresh_token and access_token...",);
(
context
.sql
@@ -116,10 +117,7 @@ pub(crate) async fn get_oauth2_access_token(
true,
)
} else {
info!(
context,
"Regenerate OAuth2 access_token by refresh_token...",
);
info!("Regenerate OAuth2 access_token by refresh_token...",);
(
context
.sql
@@ -167,14 +165,14 @@ pub(crate) async fn get_oauth2_access_token(
Ok(response) => response,
Err(err) => {
warn!(
context,
"Failed to parse OAuth2 JSON response from {}: error: {}", token_url, err
"Failed to parse OAuth2 JSON response from {}: error: {}",
token_url, err
);
return Ok(None);
}
},
Err(err) => {
warn!(context, "Error calling OAuth2 at {}: {:?}", token_url, err);
warn!("Error calling OAuth2 at {}: {:?}", token_url, err);
return Ok(None);
}
};
@@ -215,14 +213,14 @@ pub(crate) async fn get_oauth2_access_token(
.await?;
}
} else {
warn!(context, "Failed to find OAuth2 access token");
warn!("Failed to find OAuth2 access token");
}
drop(lock);
Ok(response.access_token)
} else {
warn!(context, "Internal OAuth2 error: 2");
warn!("Internal OAuth2 error: 2");
Ok(None)
}
@@ -234,7 +232,7 @@ pub(crate) async fn get_oauth2_addr(
code: &str,
) -> Result<Option<String>> {
let socks5_enabled = context.get_config_bool(Config::Socks5Enabled).await?;
let oauth2 = match Oauth2::from_address(context, addr, socks5_enabled).await {
let oauth2 = match Oauth2::from_address(addr, socks5_enabled).await {
Some(o) => o,
None => return Ok(None),
};
@@ -260,13 +258,13 @@ pub(crate) async fn get_oauth2_addr(
}
impl Oauth2 {
async fn from_address(context: &Context, addr: &str, skip_mx: bool) -> Option<Self> {
async fn from_address(addr: &str, skip_mx: bool) -> Option<Self> {
let addr_normalized = normalize_addr(addr);
if let Some(domain) = addr_normalized
.find('@')
.map(|index| addr_normalized.split_at(index + 1).1)
{
if let Some(oauth2_authorizer) = provider::get_provider_info(context, domain, skip_mx)
if let Some(oauth2_authorizer) = provider::get_provider_info(domain, skip_mx)
.await
.and_then(|provider| provider.oauth2_authorizer.as_ref())
{
@@ -294,14 +292,14 @@ impl Oauth2 {
let client = match crate::http::get_client(socks5_config) {
Ok(cl) => cl,
Err(err) => {
warn!(context, "failed to get HTTP client: {}", err);
warn!("failed to get HTTP client: {}", err);
return None;
}
};
let response = match client.get(userinfo_url).send().await {
Ok(response) => response,
Err(err) => {
warn!(context, "failed to get userinfo: {}", err);
warn!("failed to get userinfo: {}", err);
return None;
}
};
@@ -309,7 +307,7 @@ impl Oauth2 {
let parsed = match response {
Ok(parsed) => parsed,
Err(err) => {
warn!(context, "Error getting userinfo: {}", err);
warn!("Error getting userinfo: {}", err);
return None;
}
};
@@ -319,11 +317,11 @@ impl Oauth2 {
if let Some(s) = addr.as_str() {
Some(s.to_string())
} else {
warn!(context, "E-mail in userinfo is not a string: {}", addr);
warn!("E-mail in userinfo is not a string: {}", addr);
None
}
} else {
warn!(context, "E-mail missing in userinfo.");
warn!("E-mail missing in userinfo.");
None
}
}
@@ -377,39 +375,34 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_oauth_from_address() {
let t = TestContext::new().await;
assert_eq!(
Oauth2::from_address(&t, "hello@gmail.com", false).await,
Oauth2::from_address("hello@gmail.com", false).await,
Some(OAUTH2_GMAIL)
);
assert_eq!(
Oauth2::from_address(&t, "hello@googlemail.com", false).await,
Oauth2::from_address("hello@googlemail.com", false).await,
Some(OAUTH2_GMAIL)
);
assert_eq!(
Oauth2::from_address(&t, "hello@yandex.com", false).await,
Oauth2::from_address("hello@yandex.com", false).await,
Some(OAUTH2_YANDEX)
);
assert_eq!(
Oauth2::from_address(&t, "hello@yandex.ru", false).await,
Oauth2::from_address("hello@yandex.ru", false).await,
Some(OAUTH2_YANDEX)
);
assert_eq!(Oauth2::from_address(&t, "hello@web.de", false).await, None);
assert_eq!(Oauth2::from_address("hello@web.de", false).await, None);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_oauth_from_mx() {
// youtube staff seems to use "google workspace with oauth2", figures this out by MX lookup
let t = TestContext::new().await;
assert_eq!(
Oauth2::from_address(&t, "hello@youtube.com", false).await,
Oauth2::from_address("hello@youtube.com", false).await,
Some(OAUTH2_GMAIL)
);
// without MX lookup, we would not know as youtube.com is not in our provider-db
assert_eq!(
Oauth2::from_address(&t, "hello@youtube.com", true).await,
None
);
assert_eq!(Oauth2::from_address("hello@youtube.com", true).await, None);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]

View File

@@ -4,6 +4,7 @@ use std::collections::HashSet;
use anyhow::{Context as _, Error, Result};
use num_traits::FromPrimitive;
use tracing::warn;
use crate::aheader::{Aheader, EncryptPreference};
use crate::chat::{self, Chat};
@@ -594,10 +595,8 @@ impl Peerstate {
}
Err(err) => {
warn!(
context,
"New address {:?} is not valid, not doing AEAP: {:#}.",
new_addr,
err
new_addr, err
)
}
}

View File

@@ -4,10 +4,10 @@ mod data;
use anyhow::Result;
use chrono::{NaiveDateTime, NaiveTime};
use tracing::warn;
use trust_dns_resolver::{config, AsyncResolver, TokioAsyncResolver};
use crate::config::Config;
use crate::context::Context;
use crate::provider::data::{PROVIDER_DATA, PROVIDER_IDS, PROVIDER_UPDATED};
/// Provider status according to manual testing.
@@ -184,11 +184,7 @@ fn get_resolver() -> Result<TokioAsyncResolver> {
///
/// For compatibility, email address can be passed to this function
/// instead of the domain.
pub async fn get_provider_info(
context: &Context,
domain: &str,
skip_mx: bool,
) -> Option<&'static Provider> {
pub async fn get_provider_info(domain: &str, skip_mx: bool) -> Option<&'static Provider> {
let domain = domain.rsplit('@').next()?;
if let Some(provider) = get_provider_by_domain(domain) {
@@ -196,7 +192,7 @@ pub async fn get_provider_info(
}
if !skip_mx {
if let Some(provider) = get_provider_by_mx(context, domain).await {
if let Some(provider) = get_provider_by_mx(domain).await {
return Some(provider);
}
}
@@ -216,7 +212,7 @@ pub fn get_provider_by_domain(domain: &str) -> Option<&'static Provider> {
/// Finds a provider based on MX record for the given domain.
///
/// For security reasons, only Gmail can be configured this way.
pub async fn get_provider_by_mx(context: &Context, domain: &str) -> Option<&'static Provider> {
pub async fn get_provider_by_mx(domain: &str) -> Option<&'static Provider> {
if let Ok(resolver) = get_resolver() {
let mut fqdn: String = domain.to_string();
if !fqdn.ends_with('.') {
@@ -243,7 +239,7 @@ pub async fn get_provider_by_mx(context: &Context, domain: &str) -> Option<&'sta
}
}
} else {
warn!(context, "cannot get a resolver to check MX records.");
warn!("cannot get a resolver to check MX records.");
}
None
@@ -272,7 +268,6 @@ mod tests {
use chrono::NaiveDate;
use super::*;
use crate::test_utils::TestContext;
use crate::tools::time;
#[test]
@@ -322,13 +317,12 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_get_provider_info() {
let t = TestContext::new().await;
assert!(get_provider_info(&t, "", false).await.is_none());
assert!(get_provider_info(&t, "google.com", false).await.unwrap().id == "gmail");
assert!(get_provider_info("", false).await.is_none());
assert!(get_provider_info("google.com", false).await.unwrap().id == "gmail");
// get_provider_info() accepts email addresses for backwards compatibility
assert!(
get_provider_info(&t, "example@google.com", false)
get_provider_info("example@google.com", false)
.await
.unwrap()
.id

View File

@@ -1513,7 +1513,7 @@ static P_ZOHO: Lazy<Provider> = Lazy::new(|| Provider {
});
pub(crate) static PROVIDER_DATA: Lazy<HashMap<&'static str, &'static Provider>> = Lazy::new(|| {
[
HashMap::from([
("163.com", &*P_163),
("aktivix.org", &*P_AKTIVIX_ORG),
("aol.com", &*P_AOL),
@@ -1875,14 +1875,11 @@ pub(crate) static PROVIDER_DATA: Lazy<HashMap<&'static str, &'static Provider>>
("zohomail.eu", &*P_ZOHO),
("zohomail.com", &*P_ZOHO),
("zoho.com", &*P_ZOHO),
]
.iter()
.copied()
.collect()
])
});
pub(crate) static PROVIDER_IDS: Lazy<HashMap<&'static str, &'static Provider>> = Lazy::new(|| {
[
HashMap::from([
("163", &*P_163),
("aktivix.org", &*P_AKTIVIX_ORG),
("aol", &*P_AOL),
@@ -1945,10 +1942,7 @@ pub(crate) static PROVIDER_IDS: Lazy<HashMap<&'static str, &'static Provider>> =
("yggmail", &*P_YGGMAIL),
("ziggo.nl", &*P_ZIGGO_NL),
("zoho", &*P_ZOHO),
]
.iter()
.copied()
.collect()
])
});
pub static PROVIDER_UPDATED: Lazy<chrono::NaiveDate> =

View File

@@ -8,6 +8,7 @@ pub use dclogin_scheme::LoginOptions;
use once_cell::sync::Lazy;
use percent_encoding::percent_decode_str;
use serde::Deserialize;
use tracing::info;
use self::dclogin_scheme::configure_from_login_qr;
use crate::chat::{self, get_chat_id_by_grpid, ChatIdBlocked};
@@ -34,6 +35,7 @@ const VCARD_SCHEME: &str = "BEGIN:VCARD";
const SMTP_SCHEME: &str = "SMTP:";
const HTTP_SCHEME: &str = "http://";
const HTTPS_SCHEME: &str = "https://";
pub(crate) const DCBACKUP_SCHEME: &str = "DCBACKUP:";
/// Scanned QR code.
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -102,6 +104,20 @@ pub enum Qr {
domain: String,
},
/// Provides a backup that can be retrieve.
///
/// This contains all the data needed to connect to a device and download a backup from
/// it to configure the receiving device with the same account.
Backup {
/// Printable version of the provider information.
///
/// This is the printable version of a `sendme` ticket, which contains all the
/// information to connect to and authenticate a backup provider.
///
/// The format is somewhat opaque, but `sendme` can deserialise this.
ticket: iroh::provider::Ticket,
},
/// Ask the user if they want to use the given service for video chats.
WebrtcInstance {
/// Server domain name.
@@ -232,7 +248,7 @@ fn starts_with_ignore_case(string: &str, pattern: &str) -> bool {
/// The function should be called after a QR code is scanned.
/// The function takes the raw text scanned and checks what can be done with it.
pub async fn check_qr(context: &Context, qr: &str) -> Result<Qr> {
info!(context, "Scanned QR code: {}", qr);
info!("Scanned QR code: {}", qr);
let qrcode = if starts_with_ignore_case(qr, OPENPGP4FPR_SCHEME) {
decode_openpgp(context, qr)
@@ -244,6 +260,8 @@ pub async fn check_qr(context: &Context, qr: &str) -> Result<Qr> {
dclogin_scheme::decode_login(qr)?
} else if starts_with_ignore_case(qr, DCWEBRTC_SCHEME) {
decode_webrtc_instance(context, qr)?
} else if starts_with_ignore_case(qr, DCBACKUP_SCHEME) {
decode_backup(qr)?
} else if qr.starts_with(MAILTO_SCHEME) {
decode_mailto(context, qr).await?
} else if qr.starts_with(SMTP_SCHEME) {
@@ -264,6 +282,19 @@ pub async fn check_qr(context: &Context, qr: &str) -> Result<Qr> {
Ok(qrcode)
}
/// Formats the text of the [`Qr::Backup`] variant.
///
/// This is the inverse of [`check_qr`] for that variant only.
///
/// TODO: Refactor this so all variants have a correct [`Display`] and transform `check_qr`
/// into `FromStr`.
pub fn format_backup(qr: &Qr) -> Result<String> {
match qr {
Qr::Backup { ref ticket } => Ok(format!("{DCBACKUP_SCHEME}{ticket}")),
_ => Err(anyhow!("Not a backup QR code")),
}
}
/// scheme: `OPENPGP4FPR:FINGERPRINT#a=ADDR&n=NAME&i=INVITENUMBER&s=AUTH`
/// or: `OPENPGP4FPR:FINGERPRINT#a=ADDR&g=GROUPNAME&x=GROUPID&i=INVITENUMBER&s=AUTH`
/// or: `OPENPGP4FPR:FINGERPRINT#a=ADDR`
@@ -471,6 +502,18 @@ fn decode_webrtc_instance(_context: &Context, qr: &str) -> Result<Qr> {
}
}
/// Decodes a [`DCBACKUP_SCHEME`] QR code.
///
/// The format of this scheme is `DCBACKUP:<encoded ticket>`. The encoding is the
/// [`iroh::provider::Ticket`]'s `Display` impl.
fn decode_backup(qr: &str) -> Result<Qr> {
let payload = qr
.strip_prefix(DCBACKUP_SCHEME)
.ok_or(anyhow!("invalid DCBACKUP scheme"))?;
let ticket: iroh::provider::Ticket = payload.parse().context("invalid DCBACKUP payload")?;
Ok(Qr::Backup { ticket })
}
#[derive(Debug, Deserialize)]
struct CreateAccountSuccessResponse {
/// Email address.

View File

@@ -11,7 +11,9 @@ use crate::{
config::Config,
contact::{Contact, ContactId},
context::Context,
securejoin, stock_str,
qr::{self, Qr},
securejoin,
stock_str::{self, backup_transfer_qr},
};
/// Returns SVG of the QR code to join the group or verify contact.
@@ -47,6 +49,34 @@ async fn generate_join_group_qr_code(context: &Context, chat_id: ChatId) -> Resu
}
async fn generate_verification_qr(context: &Context) -> Result<String> {
let (avatar, displayname, addr, color) = self_info(context).await?;
inner_generate_secure_join_qr_code(
&stock_str::setup_contact_qr_description(context, &displayname, &addr).await,
&securejoin::get_securejoin_qr(context, None).await?,
&color,
avatar,
displayname.chars().next().unwrap_or('#'),
)
}
/// Renders a [`Qr::Backup`] QR code as an SVG image.
pub async fn generate_backup_qr(context: &Context, qr: &Qr) -> Result<String> {
let content = qr::format_backup(qr)?;
let (avatar, displayname, _addr, color) = self_info(context).await?;
let description = backup_transfer_qr(context).await?;
inner_generate_secure_join_qr_code(
&description,
&content,
&color,
avatar,
displayname.chars().next().unwrap_or('#'),
)
}
/// Returns `(avatar, displayname, addr, color) of the configured account.
async fn self_info(context: &Context) -> Result<(Option<Vec<u8>>, String, String, String)> {
let contact = Contact::get_by_id(context, ContactId::SELF).await?;
let avatar = match contact.get_profile_image(context).await? {
@@ -59,16 +89,11 @@ async fn generate_verification_qr(context: &Context) -> Result<String> {
let displayname = match context.get_config(Config::Displayname).await? {
Some(name) => name,
None => contact.get_addr().to_owned(),
None => contact.get_addr().to_string(),
};
inner_generate_secure_join_qr_code(
&stock_str::setup_contact_qr_description(context, &displayname, contact.get_addr()).await,
&securejoin::get_securejoin_qr(context, None).await?,
&color_int_to_hex_string(contact.get_color()),
avatar,
displayname.chars().next().unwrap_or('#'),
)
let addr = contact.get_addr().to_string();
let color = color_int_to_hex_string(contact.get_color());
Ok((avatar, displayname, addr, color))
}
fn inner_generate_secure_join_qr_code(
@@ -272,6 +297,12 @@ fn inner_generate_secure_join_qr_code(
#[cfg(test)]
mod tests {
use testdir::testdir;
use crate::imex::BackupProvider;
use crate::qr::format_backup;
use crate::test_utils::TestContextManager;
use super::*;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -286,4 +317,20 @@ mod tests {
.unwrap();
assert!(svg.contains("descr123 &quot; &lt; &gt; &amp;"))
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_generate_backup_qr() {
let dir = testdir!();
let mut tcm = TestContextManager::new();
let ctx = tcm.alice().await;
let provider = BackupProvider::prepare(&ctx).await.unwrap();
let qr = provider.qr();
println!("{}", format_backup(&qr).unwrap());
let rendered = generate_backup_qr(&ctx, &qr).await.unwrap();
tokio::fs::write(dir.join("qr.svg"), &rendered)
.await
.unwrap();
assert_eq!(rendered.get(..4), Some("<svg"));
}
}

View File

@@ -5,6 +5,7 @@ use std::sync::atomic::Ordering;
use anyhow::{anyhow, Context as _, Result};
use async_imap::types::{Quota, QuotaResource};
use tracing::warn;
use crate::chat::add_device_msg_with_importance;
use crate::config::Config;
@@ -115,7 +116,9 @@ impl Context {
let requested = self.quota_update_request.swap(true, Ordering::Relaxed);
if !requested {
// Quota update was not requested before.
self.interrupt_inbox(InterruptInfo::new(false)).await;
self.scheduler
.interrupt_inbox(InterruptInfo::new(false))
.await;
}
Ok(())
}
@@ -132,7 +135,7 @@ impl Context {
/// Called in response to `Action::UpdateRecentQuota`.
pub(crate) async fn update_recent_quota(&self, imap: &mut Imap) -> Result<()> {
if let Err(err) = imap.prepare(self).await {
warn!(self, "could not connect: {:#}", err);
warn!("could not connect: {err:#}");
return Ok(());
}
@@ -160,7 +163,7 @@ impl Context {
self.set_config(Config::QuotaExceeding, None).await?;
}
}
Err(err) => warn!(self, "cannot get highest quota usage: {:#}", err),
Err(err) => warn!("cannot get highest quota usage: {:#}", err),
}
}

View File

@@ -18,6 +18,7 @@ use std::collections::BTreeMap;
use std::fmt;
use anyhow::Result;
use tracing::info;
use crate::chat::{send_msg, ChatId};
use crate::contact::ContactId;
@@ -232,8 +233,8 @@ pub(crate) async fn set_msg_reaction(
set_msg_id_reaction(context, msg_id, chat_id, contact_id, reaction).await
} else {
info!(
context,
"Can't assign reaction to unknown message with Message-ID {}", in_reply_to
"Can't assign reaction to unknown message with Message-ID {}",
in_reply_to
);
Ok(())
}

View File

@@ -9,6 +9,7 @@ use mailparse::{parse_mail, SingleInfo};
use num_traits::FromPrimitive;
use once_cell::sync::Lazy;
use regex::Regex;
use tracing::{info, warn};
use crate::chat::{self, Chat, ChatId, ChatIdBlocked, ProtectionStatus};
use crate::config::Config;
@@ -96,11 +97,10 @@ pub(crate) async fn receive_imf_inner(
is_partial_download: Option<u32>,
fetching_existing_messages: bool,
) -> Result<Option<ReceivedMsg>> {
info!(context, "Receiving message, seen={}...", seen);
info!("Receiving message, seen={seen}...");
if std::env::var(crate::DCC_MIME_DEBUG).is_ok() {
info!(
context,
"receive_imf: incoming message mime-body:\n{}",
String::from_utf8_lossy(imf_raw),
);
@@ -109,7 +109,7 @@ pub(crate) async fn receive_imf_inner(
let mut mime_parser = match MimeMessage::from_bytes(context, imf_raw, is_partial_download).await
{
Err(err) => {
warn!(context, "receive_imf: can't parse MIME: {:#}", err);
warn!("receive_imf: can't parse MIME: {err:#}.");
let msg_ids;
if !rfc724_mid.starts_with(GENERATED_PREFIX) {
let row_id = context
@@ -138,11 +138,11 @@ pub(crate) async fn receive_imf_inner(
// we can not add even an empty record if we have no info whatsoever
if !mime_parser.has_headers() {
warn!(context, "receive_imf: no headers found");
warn!("receive_imf: no headers found.");
return Ok(None);
}
info!(context, "received message has Message-Id: {}", rfc724_mid);
info!("Received message has Message-Id: {rfc724_mid}");
// check, if the mail is already in our database.
// make sure, this check is done eg. before securejoin-processing.
@@ -151,14 +151,11 @@ pub(crate) async fn receive_imf_inner(
let msg = Message::load_from_db(context, old_msg_id).await?;
if msg.download_state() != DownloadState::Done && is_partial_download.is_none() {
// the message was partially downloaded before and is fully downloaded now.
info!(
context,
"Message already partly in DB, replacing by full message."
);
info!("Message already partly in DB, replacing by full message.");
Some(old_msg_id)
} else {
// the message was probably moved around.
info!(context, "Message already in DB, doing nothing.");
info!("Message already in DB, doing nothing.");
return Ok(None);
}
} else {
@@ -180,10 +177,7 @@ pub(crate) async fn receive_imf_inner(
match from_field_to_contact_id(context, &mime_parser.from, prevent_rename).await? {
Some(contact_id_res) => contact_id_res,
None => {
warn!(
context,
"receive_imf: From field does not contain an acceptable address"
);
warn!("receive_imf: From field does not contain an acceptable address.");
return Ok(None);
}
};
@@ -246,10 +240,7 @@ pub(crate) async fn receive_imf_inner(
.iter()
.all(|recipient| mime_parser.gossiped_addr.contains(&recipient.addr))
{
info!(
context,
"Received message contains Autocrypt-Gossip for all members, updating timestamp."
);
info!("Received message contains Autocrypt-Gossip for all members, updating timestamp.");
if chat_id.get_gossiped_timestamp(context).await? < sent_timestamp {
chat_id
.set_gossiped_timestamp(context, sent_timestamp)
@@ -269,13 +260,13 @@ pub(crate) async fn receive_imf_inner(
if from_id == ContactId::SELF {
if mime_parser.was_encrypted() {
if let Err(err) = context.execute_sync_items(sync_items).await {
warn!(context, "receive_imf cannot execute sync items: {:#}", err);
warn!("receive_imf cannot execute sync items: {err:#}.");
}
} else {
warn!(context, "sync items are not encrypted.");
warn!("Sync items are not encrypted.");
}
} else {
warn!(context, "sync items not sent by self.");
warn!("Sync items not sent by self.");
}
}
@@ -284,7 +275,7 @@ pub(crate) async fn receive_imf_inner(
.receive_status_update(from_id, insert_msg_id, status_update)
.await
{
warn!(context, "receive_imf cannot update status: {:#}", err);
warn!("receive_imf cannot update status: {err:#}.");
}
}
@@ -302,10 +293,7 @@ pub(crate) async fn receive_imf_inner(
)
.await
{
warn!(
context,
"receive_imf cannot update profile image: {:#}", err
);
warn!("receive_imf cannot update profile image: {err:#}.");
};
}
}
@@ -331,7 +319,7 @@ pub(crate) async fn receive_imf_inner(
)
.await
{
warn!(context, "cannot update contact status: {:#}", err);
warn!("Cannot update contact status: {err:#}.");
}
}
@@ -392,10 +380,7 @@ pub async fn from_field_to_contact_id(
let from_addr = match ContactAddress::new(&from.addr) {
Ok(from_addr) => from_addr,
Err(err) => {
warn!(
context,
"Cannot create a contact for the given From field: {:#}.", err
);
warn!("Cannot create a contact for the given From field: {err:#}.");
return Ok(None);
}
};
@@ -476,7 +461,7 @@ async fn add_parts(
// this message is a classic email not a chat-message nor a reply to one
match show_emails {
ShowEmails::Off => {
info!(context, "Classical email not shown (TRASH)");
info!("Classical email not shown (TRASH).");
chat_id = Some(DC_CHAT_ID_TRASH);
allow_creation = false;
}
@@ -519,7 +504,7 @@ async fn add_parts(
securejoin_seen = false;
}
Err(err) => {
warn!(context, "Error in Secure-Join message handling: {:#}", err);
warn!("Error in Secure-Join message handling: {err:#}.");
chat_id = Some(DC_CHAT_ID_TRASH);
securejoin_seen = true;
}
@@ -536,7 +521,7 @@ async fn add_parts(
if chat_id.is_none() && mime_parser.delivery_report.is_some() {
chat_id = Some(DC_CHAT_ID_TRASH);
info!(context, "Message is a DSN (TRASH)",);
info!("Message is a DSN (TRASH).",);
}
if chat_id.is_none() {
@@ -710,10 +695,7 @@ async fn add_parts(
// the contact requests will pop up and this should be just fine.
Contact::scaleup_origin_by_id(context, from_id, Origin::IncomingReplyTo)
.await?;
info!(
context,
"Message is a reply to a known message, mark sender as known.",
);
info!("Message is a reply to a known message, mark sender as known.",);
}
}
}
@@ -754,7 +736,7 @@ async fn add_parts(
chat_id = None;
}
Err(err) => {
warn!(context, "Error in Secure-Join watching: {:#}", err);
warn!("Error in Secure-Join watching: {err:#}.");
chat_id = Some(DC_CHAT_ID_TRASH);
}
}
@@ -771,7 +753,7 @@ async fn add_parts(
if is_draft {
// Most mailboxes have a "Drafts" folder where constantly new emails appear but we don't actually want to show them
info!(context, "Email is probably just a draft (TRASH)");
info!("Email is probably just a draft (TRASH).");
chat_id = Some(DC_CHAT_ID_TRASH);
}
@@ -863,14 +845,14 @@ async fn add_parts(
if fetching_existing_messages && mime_parser.decrypting_failed {
chat_id = Some(DC_CHAT_ID_TRASH);
// We are only gathering old messages on first start. We do not want to add loads of non-decryptable messages to the chats.
info!(context, "Existing non-decipherable message. (TRASH)");
info!("Existing non-decipherable message (TRASH).");
}
if mime_parser.webxdc_status_update.is_some() && mime_parser.parts.len() == 1 {
if let Some(part) = mime_parser.parts.first() {
if part.typ == Viewtype::Text && part.msg.is_empty() {
chat_id = Some(DC_CHAT_ID_TRASH);
info!(context, "Message is a status update only (TRASH)");
info!("Message is a status update only (TRASH).");
}
}
}
@@ -880,7 +862,7 @@ async fn add_parts(
DC_CHAT_ID_TRASH
} else {
chat_id.unwrap_or_else(|| {
info!(context, "No chat id for message (TRASH)");
info!("No chat id for message (TRASH).");
DC_CHAT_ID_TRASH
})
};
@@ -892,10 +874,7 @@ async fn add_parts(
match value.parse::<EphemeralTimer>() {
Ok(timer) => timer,
Err(err) => {
warn!(
context,
"can't parse ephemeral timer \"{}\": {:#}", value, err
);
warn!("Can't parse ephemeral timer \"{value}\": {err:#}.");
EphemeralTimer::Disabled
}
}
@@ -915,12 +894,7 @@ async fn add_parts(
&& !mime_parser.parts.is_empty()
&& chat_id.get_ephemeral_timer(context).await? != ephemeral_timer
{
info!(
context,
"received new ephemeral timer value {:?} for chat {}, checking if it should be applied",
ephemeral_timer,
chat_id
);
info!("Received new ephemeral timer value {ephemeral_timer:?} for chat {chat_id}, checking if it should be applied.");
if is_dc_message == MessengerMessage::Yes
&& get_previous_message(context, mime_parser)
.await?
@@ -935,10 +909,7 @@ async fn add_parts(
// value is different, it means the sender has not received some timer update that we
// have seen or sent ourselves, so we ignore incoming timer to prevent a rollback.
warn!(
context,
"ignoring ephemeral timer change to {:?} for chat {} to avoid rollback",
ephemeral_timer,
chat_id
"Ignoring ephemeral timer change to {ephemeral_timer:?} for chat {chat_id} to avoid rollback.",
);
} else if chat_id
.update_timestamp(context, Param::EphemeralSettingsTimestamp, sent_timestamp)
@@ -948,15 +919,9 @@ async fn add_parts(
.inner_set_ephemeral_timer(context, ephemeral_timer)
.await
{
warn!(
context,
"failed to modify timer for chat {}: {:#}", chat_id, err
);
warn!("Failed to modify timer for chat {chat_id}: {err:#}.");
} else {
info!(
context,
"updated ephemeral timer to {:?} for chat {}", ephemeral_timer, chat_id
);
info!("Updated ephemeral timer to {ephemeral_timer:?} for chat {chat_id}.");
if mime_parser.is_system_message != SystemMessage::EphemeralTimerChanged {
chat::add_info_msg(
context,
@@ -968,10 +933,7 @@ async fn add_parts(
}
}
} else {
warn!(
context,
"ignoring ephemeral timer change to {:?} because it's outdated", ephemeral_timer
);
warn!("Ignoring ephemeral timer change to {ephemeral_timer:?} because it is outdated.");
}
}
@@ -999,7 +961,7 @@ async fn add_parts(
if chat.is_protected() || new_status.is_some() {
if let Err(err) = check_verified_properties(context, mime_parser, from_id, to_ids).await
{
warn!(context, "verification problem: {:#}", err);
warn!("Verification problem: {err:#}.");
let s = format!("{err}. See 'Info' for more details");
mime_parser.repl_msg_by_error(&s);
} else {
@@ -1256,10 +1218,7 @@ SET rfc724_mid=excluded.rfc724_mid, chat_id=excluded.chat_id,
chat_id.unarchive_if_not_muted(context, state).await?;
info!(
context,
"Message has {icnt} parts and is assigned to chat #{chat_id}."
);
info!("Message has {icnt} parts and is assigned to chat #{chat_id}.");
// new outgoing message from another device marks the chat as noticed.
if !incoming && !chat_id.is_special() {
@@ -1340,7 +1299,6 @@ async fn save_locations(
}
} else {
warn!(
context,
"Address in location.kml {:?} is not the same as the sender address {:?}.",
addr,
contact.get_addr()
@@ -1428,8 +1386,8 @@ async fn lookup_chat_by_reply(
}
info!(
context,
"Assigning message to {} as it's a reply to {}", parent_chat.id, parent.rfc724_mid
"Assigning message to {} as it's a reply to {}.",
parent_chat.id, parent.rfc724_mid
);
return Ok(Some((parent_chat.id, parent_chat.blocked)));
}
@@ -1499,7 +1457,7 @@ async fn create_or_lookup_group(
.map(|chat_id| (chat_id, create_blocked));
return Ok(res);
} else {
info!(context, "creating ad-hoc group prevented from caller");
info!("Creating ad-hoc group prevented from caller.");
return Ok(None);
};
@@ -1525,7 +1483,7 @@ async fn create_or_lookup_group(
let create_protected = if mime_parser.get_header(HeaderDef::ChatVerified).is_some() {
if let Err(err) = check_verified_properties(context, mime_parser, from_id, to_ids).await {
warn!(context, "verification problem: {:#}", err);
warn!("Verification problem: {err:#}.");
let s = format!("{err}. See 'Info' for more details");
mime_parser.repl_msg_by_error(&s);
}
@@ -1557,7 +1515,7 @@ async fn create_or_lookup_group(
{
// Group does not exist but should be created.
if !allow_creation {
info!(context, "creating group forbidden by caller");
info!("Creating group forbidden by caller.");
return Ok(None);
}
@@ -1602,6 +1560,7 @@ async fn create_or_lookup_group(
// .await?;
//}
info!("Chat {} is created.", new_chat_id);
context.emit_event(EventType::ChatModified(new_chat_id));
}
@@ -1618,7 +1577,7 @@ async fn create_or_lookup_group(
} else {
// The message was decrypted successfully, but contains a late "quit" or otherwise
// unwanted message.
info!(context, "message belongs to unwanted group (TRASH)");
info!("Message belongs to unwanted group (TRASH).");
Ok(Some((DC_CHAT_ID_TRASH, Blocked::Not)))
}
}
@@ -1658,7 +1617,7 @@ async fn apply_group_changes(
Some(stock_str::msg_del_member(context, &removed_addr, from_id).await)
};
}
None => warn!(context, "removed {:?} has no contact_id", removed_addr),
None => warn!("Removed {removed_addr:?} has no contact_id."),
}
} else {
removed_id = None;
@@ -1683,7 +1642,7 @@ async fn apply_group_changes(
.update_timestamp(context, Param::GroupNameTimestamp, sent_timestamp)
.await?
{
info!(context, "updating grpname for chat {}", chat_id);
info!("Updating grpname for chat {chat_id}.");
context
.sql
.execute(
@@ -1723,7 +1682,7 @@ async fn apply_group_changes(
if mime_parser.get_header(HeaderDef::ChatVerified).is_some() {
if let Err(err) = check_verified_properties(context, mime_parser, from_id, to_ids).await {
warn!(context, "verification problem: {:#}", err);
warn!("Verification problem: {err:#}.");
let s = format!("{err}. See 'Info' for more details");
mime_parser.repl_msg_by_error(&s);
}
@@ -1742,10 +1701,7 @@ async fn apply_group_changes(
&& !chat::is_contact_in_chat(context, chat_id, from_id).await?
{
warn!(
context,
"Contact {} attempts to modify group chat {} member list without being a member.",
from_id,
chat_id
"Contact {from_id} attempts to modify group chat {chat_id} member list without being a member."
);
} else if chat_id
.update_timestamp(context, Param::MemberListTimestamp, sent_timestamp)
@@ -1778,10 +1734,7 @@ async fn apply_group_changes(
}
members_to_add.dedup();
info!(
context,
"adding {:?} to chat id={}", members_to_add, chat_id
);
info!("Adding {members_to_add:?} to chat id={chat_id}.");
chat::add_to_chat_contacts_table(context, chat_id, &members_to_add).await?;
send_event_chat_modified = true;
}
@@ -1789,19 +1742,13 @@ async fn apply_group_changes(
if let Some(avatar_action) = &mime_parser.group_avatar {
if !chat::is_contact_in_chat(context, chat_id, ContactId::SELF).await? {
warn!(
context,
"Received group avatar update for group chat {} we are not a member of.", chat_id
);
warn!("Received group avatar update for group chat {chat_id} we are not a member of.");
} else if !chat::is_contact_in_chat(context, chat_id, from_id).await? {
warn!(
context,
"Contact {} attempts to modify group chat {} avatar without being a member.",
from_id,
chat_id
"Contact {from_id} attempts to modify group chat {chat_id} avatar without being a member.",
);
} else {
info!(context, "group-avatar change for {}", chat_id);
info!("Group-avatar change for {chat_id}.");
if chat
.param
.update_timestamp(Param::AvatarTimestamp, sent_timestamp)?
@@ -1935,7 +1882,7 @@ async fn create_or_lookup_mailinglist(
.await
.with_context(|| {
format!(
"Failed to create mailinglist '{}' for grpid={}",
"failed to create mailinglist '{}' for grpid={}",
&name, &listid
)
})?;
@@ -1943,7 +1890,7 @@ async fn create_or_lookup_mailinglist(
chat::add_to_chat_contacts_table(context, chat_id, &[ContactId::SELF]).await?;
Ok(Some((chat_id, blocked)))
} else {
info!(context, "creating list forbidden by caller");
info!("Creating list forbidden by caller.");
Ok(None)
}
}
@@ -1966,7 +1913,7 @@ async fn apply_mailinglist_changes(
let list_post = match ContactAddress::new(list_post) {
Ok(list_post) => list_post,
Err(err) => {
warn!(context, "Invalid List-Post: {:#}.", err);
warn!("Invalid List-Post: {:#}.", err);
return Ok(());
}
};
@@ -2033,10 +1980,7 @@ async fn create_adhoc_group(
member_ids: &[ContactId],
) -> Result<Option<ChatId>> {
if mime_parser.is_mailinglist_message() {
info!(
context,
"not creating ad-hoc group for mailing list message"
);
info!("Not creating ad-hoc group for mailing list message.");
return Ok(None);
}
@@ -2050,15 +1994,12 @@ async fn create_adhoc_group(
// Chat-Group-ID and incompatible Message-ID format.
//
// Instead, assign the message to 1:1 chat with the sender.
warn!(
context,
"not creating ad-hoc group for message that cannot be decrypted"
);
warn!("Not creating ad-hoc group for message that cannot be decrypted.");
return Ok(None);
}
if member_ids.len() < 3 {
info!(context, "not creating ad-hoc group: too few contacts");
info!("Not creating ad-hoc group: too few contacts.");
return Ok(None);
}
@@ -2098,11 +2039,7 @@ async fn check_verified_properties(
// we do not fail here currently, this would exclude (a) non-deltas
// and (b) deltas with different protection views across multiple devices.
// for group creation or protection enabled/disabled, however, Chat-Verified is respected.
warn!(
context,
"{} did not mark message as protected.",
contact.get_addr()
);
warn!("{} did not mark message as protected.", contact.get_addr());
}
// ensure, the contact is verified
@@ -2164,8 +2101,7 @@ async fn check_verified_properties(
for (to_addr, mut is_verified) in rows {
info!(
context,
"check_verified_properties: {:?} self={:?}",
"check_verified_properties: {:?} self={:?}.",
to_addr,
context.is_self_addr(&to_addr).await
);
@@ -2183,7 +2119,7 @@ async fn check_verified_properties(
|| peerstate.verified_key_fingerprint != peerstate.public_key_fingerprint
&& peerstate.verified_key_fingerprint != peerstate.gossip_key_fingerprint
{
info!(context, "{} has verified {}.", contact.get_addr(), to_addr);
info!("{} has verified {}.", contact.get_addr(), to_addr);
let fp = peerstate.gossip_key_fingerprint.clone();
if let Some(fp) = fp {
peerstate.set_verified(
@@ -2315,7 +2251,7 @@ async fn add_or_lookup_contacts_by_address_list(
add_or_lookup_contact_by_addr(context, display_name, addr, origin).await?;
contact_ids.insert(contact_id);
} else {
warn!(context, "Contact with address {:?} cannot exist.", addr);
warn!("Contact with address {:?} cannot exist.", addr);
}
}

View File

@@ -1,4 +1,6 @@
use tokio::fs;
use tracing_futures::WithSubscriber;
use tracing_subscriber::{prelude::*, registry::Registry};
use super::*;
use crate::aheader::EncryptPreference;
@@ -1340,8 +1342,12 @@ async fn test_apply_mailinglist_changes_assigned_by_reply() {
let chat = Chat::load_from_db(&t, chat_id).await.unwrap();
assert!(chat.can_send(&t).await.unwrap());
let subscriber = Registry::default().with(t.to_layer());
let imf_raw = format!("In-Reply-To: 3333@example.org\n{GH_MAILINGLIST2}");
receive_imf(&t, imf_raw.as_bytes(), false).await.unwrap();
receive_imf(&t, imf_raw.as_bytes(), false)
.with_subscriber(subscriber)
.await
.unwrap();
assert_eq!(
t.get_last_msg().await.in_reply_to.unwrap(),

View File

@@ -5,13 +5,19 @@ use anyhow::{bail, Context as _, Result};
use async_channel::{self as channel, Receiver, Sender};
use futures::future::try_join_all;
use futures_lite::FutureExt;
use tokio::sync::{oneshot, RwLock, RwLockWriteGuard};
use tokio::task;
use tracing::instrument::Instrument;
use tracing::{error, info, warn};
use tracing_futures::WithSubscriber;
use self::connectivity::ConnectivityStore;
use crate::config::Config;
use crate::contact::{ContactId, RecentlySeenLoop};
use crate::context::future::ContextIdFutureExt;
use crate::context::Context;
use crate::ephemeral::{self, delete_expired_imap_messages};
use crate::events::EventType;
use crate::imap::{FolderMeaning, Imap};
use crate::job;
use crate::location;
@@ -23,10 +29,207 @@ use crate::tools::{duration_to_str, maybe_add_time_based_warnings};
pub(crate) mod connectivity;
/// State of the IO scheduler, as stored on the [`Context`].
///
/// The IO scheduler can be stopped or started, but core can also pause it. After pausing
/// the IO scheduler will be restarted only if it was running before paused or
/// [`Context::start_io`] was called in the meantime while it was paused.
#[derive(Debug, Default)]
pub(crate) struct SchedulerState {
inner: RwLock<InnerSchedulerState>,
}
impl SchedulerState {
pub(crate) fn new() -> Self {
Default::default()
}
/// Whether the scheduler is currently running.
pub(crate) async fn is_running(&self) -> bool {
let inner = self.inner.read().await;
inner.scheduler.is_some()
}
/// Starts the scheduler if it is not yet started.
pub(crate) async fn start(&self, context: Context) {
let mut inner = self.inner.write().await;
inner.started = true;
if inner.scheduler.is_none() && !inner.paused {
Self::do_start(inner, context).await;
}
}
/// Starts the scheduler if it is not yet started.
async fn do_start(mut inner: RwLockWriteGuard<'_, InnerSchedulerState>, context: Context) {
info!("Starting IO.");
match Scheduler::start(context).await {
Ok(scheduler) => inner.scheduler = Some(scheduler),
Err(err) => error!("Failed to start IO: {err:#}."),
}
}
/// Stops the scheduler if it is currently running.
pub(crate) async fn stop(&self, context: &Context) {
let mut inner = self.inner.write().await;
inner.started = false;
Self::do_stop(inner, context).await;
}
/// Stops the scheduler if it is currently running.
async fn do_stop(mut inner: RwLockWriteGuard<'_, InnerSchedulerState>, context: &Context) {
// Sending an event wakes up event pollers (get_next_event)
// so the caller of stop_io() can arrange for proper termination.
// For this, the caller needs to instruct the event poller
// to terminate on receiving the next event and then call stop_io()
// which will emit the below event(s)
context.emit_event(EventType::Info("Stopping IO.".to_string()));
if let Some(debug_logging) = context.debug_logging.read().await.as_ref() {
debug_logging.loop_handle.abort();
}
if let Some(scheduler) = inner.scheduler.take() {
scheduler.stop(context).await;
}
}
/// Pauses the IO scheduler.
///
/// If it is currently running the scheduler will be stopped. When the
/// [`IoPausedGuard`] is dropped the scheduler is started again.
///
/// If in the meantime [`SchedulerState::start`] or [`SchedulerState::stop`] is called
/// resume will do the right thing and restore the scheduler to the state requested by
/// the last call.
pub(crate) async fn pause<'a>(&'_ self, context: Context) -> IoPausedGuard {
{
let mut inner = self.inner.write().await;
inner.paused = true;
Self::do_stop(inner, &context).await;
}
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
rx.await.ok();
let mut inner = context.scheduler.inner.write().await;
inner.paused = false;
if inner.started && inner.scheduler.is_none() {
SchedulerState::do_start(inner, context.clone()).await;
}
});
IoPausedGuard { sender: Some(tx) }
}
/// Restarts the scheduler, only if it is running.
pub(crate) async fn restart(&self, context: &Context) {
info!("Restarting IO.");
if self.is_running().await {
self.stop(context).await;
self.start(context.clone()).await;
}
}
/// Indicate that the network likely has come back.
pub(crate) async fn maybe_network(&self) {
let inner = self.inner.read().await;
let (inbox, oboxes) = match inner.scheduler {
Some(ref scheduler) => {
scheduler.maybe_network();
let inbox = scheduler.inbox.conn_state.state.connectivity.clone();
let oboxes = scheduler
.oboxes
.iter()
.map(|b| b.conn_state.state.connectivity.clone())
.collect::<Vec<_>>();
(inbox, oboxes)
}
None => return,
};
drop(inner);
connectivity::idle_interrupted(inbox, oboxes).await;
}
/// Indicate that the network likely is lost.
pub(crate) async fn maybe_network_lost(&self, context: &Context) {
let inner = self.inner.read().await;
let stores = match inner.scheduler {
Some(ref scheduler) => {
scheduler.maybe_network_lost();
scheduler
.boxes()
.map(|b| b.conn_state.state.connectivity.clone())
.collect()
}
None => return,
};
drop(inner);
connectivity::maybe_network_lost(context, stores).await;
}
pub(crate) async fn interrupt_inbox(&self, info: InterruptInfo) {
let inner = self.inner.read().await;
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_inbox(info);
}
}
pub(crate) async fn interrupt_smtp(&self, info: InterruptInfo) {
let inner = self.inner.read().await;
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_smtp(info);
}
}
pub(crate) async fn interrupt_ephemeral_task(&self) {
let inner = self.inner.read().await;
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_ephemeral_task();
}
}
pub(crate) async fn interrupt_location(&self) {
let inner = self.inner.read().await;
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_location();
}
}
pub(crate) async fn interrupt_recently_seen(&self, contact_id: ContactId, timestamp: i64) {
let inner = self.inner.read().await;
if let Some(ref scheduler) = inner.scheduler {
scheduler.interrupt_recently_seen(contact_id, timestamp);
}
}
}
#[derive(Debug, Default)]
struct InnerSchedulerState {
scheduler: Option<Scheduler>,
started: bool,
paused: bool,
}
/// Guard to make sure the IO Scheduler is resumed.
///
/// Returned by [`SchedulerState::pause`]. To resume the IO scheduler simply drop this
/// guard.
#[derive(Debug)]
pub(crate) struct IoPausedGuard {
sender: Option<oneshot::Sender<()>>,
}
impl Drop for IoPausedGuard {
fn drop(&mut self) {
if let Some(sender) = self.sender.take() {
// Can only fail if receiver is dropped, but then we're already resumed.
sender.send(()).ok();
}
}
}
#[derive(Debug)]
struct SchedBox {
meaning: FolderMeaning,
conn_state: ImapConnectionState,
/// IMAP loop task handle.
handle: task::JoinHandle<()>,
}
@@ -46,60 +249,10 @@ pub(crate) struct Scheduler {
recently_seen_loop: RecentlySeenLoop,
}
impl Context {
/// Indicate that the network likely has come back.
pub async fn maybe_network(&self) {
let lock = self.scheduler.read().await;
if let Some(scheduler) = &*lock {
scheduler.maybe_network();
}
connectivity::idle_interrupted(lock).await;
}
/// Indicate that the network likely is lost.
pub async fn maybe_network_lost(&self) {
let lock = self.scheduler.read().await;
if let Some(scheduler) = &*lock {
scheduler.maybe_network_lost();
}
connectivity::maybe_network_lost(self, lock).await;
}
pub(crate) async fn interrupt_inbox(&self, info: InterruptInfo) {
if let Some(scheduler) = &*self.scheduler.read().await {
scheduler.interrupt_inbox(info);
}
}
pub(crate) async fn interrupt_smtp(&self, info: InterruptInfo) {
if let Some(scheduler) = &*self.scheduler.read().await {
scheduler.interrupt_smtp(info);
}
}
pub(crate) async fn interrupt_ephemeral_task(&self) {
if let Some(scheduler) = &*self.scheduler.read().await {
scheduler.interrupt_ephemeral_task();
}
}
pub(crate) async fn interrupt_location(&self) {
if let Some(scheduler) = &*self.scheduler.read().await {
scheduler.interrupt_location();
}
}
pub(crate) async fn interrupt_recently_seen(&self, contact_id: ContactId, timestamp: i64) {
if let Some(scheduler) = &*self.scheduler.read().await {
scheduler.interrupt_recently_seen(contact_id, timestamp);
}
}
}
async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConnectionHandlers) {
use futures::future::FutureExt;
info!(ctx, "starting inbox loop");
info!("starting inbox loop");
let ImapConnectionHandlers {
mut connection,
stop_receiver,
@@ -109,7 +262,7 @@ async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConne
let fut = async move {
let ctx = ctx1;
if let Err(err) = started.send(()).await {
warn!(ctx, "inbox loop, missing started receiver: {}", err);
warn!("Inbox loop, missing started receiver: {err:#}.");
return;
};
@@ -117,7 +270,7 @@ async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConne
loop {
let job = match job::load_next(&ctx, &info).await {
Err(err) => {
error!(ctx, "Failed loading job from the database: {:#}.", err);
error!("Failed loading job from the database: {err:#}.");
None
}
Ok(job) => job,
@@ -132,14 +285,14 @@ async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConne
let quota_requested = ctx.quota_update_request.swap(false, Ordering::Relaxed);
if quota_requested {
if let Err(err) = ctx.update_recent_quota(&mut connection).await {
warn!(ctx, "Failed to update quota: {:#}.", err);
warn!("Failed to update quota: {err:#}.");
}
}
let resync_requested = ctx.resync_request.swap(false, Ordering::Relaxed);
if resync_requested {
if let Err(err) = connection.resync_folders(&ctx).await {
warn!(ctx, "Failed to resync folders: {:#}.", err);
warn!("Failed to resync folders: {err:#}.");
ctx.resync_request.store(true, Ordering::Relaxed);
}
}
@@ -155,7 +308,7 @@ async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConne
}
}
Err(err) => {
warn!(ctx, "Failed to get last housekeeping time: {}", err);
warn!("Failed to get last housekeeping time: {err:#}.");
}
};
@@ -169,17 +322,17 @@ async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConne
if let Err(err) =
ctx.set_config_bool(Config::FetchedExistingMsgs, true).await
{
warn!(ctx, "Can't set Config::FetchedExistingMsgs: {:#}", err);
warn!("Can't set Config::FetchedExistingMsgs: {:#}", err);
}
if let Err(err) = connection.fetch_existing_msgs(&ctx).await {
warn!(ctx, "Failed to fetch existing messages: {:#}", err);
connection.trigger_reconnect(&ctx);
warn!("Failed to fetch existing messages: {:#}", err);
connection.trigger_reconnect();
}
}
}
Err(err) => {
warn!(ctx, "Can't get Config::FetchedExistingMsgs: {:#}", err);
warn!("Can't get Config::FetchedExistingMsgs: {:#}", err);
}
}
@@ -192,7 +345,7 @@ async fn inbox_loop(ctx: Context, started: Sender<()>, inbox_handlers: ImapConne
stop_receiver
.recv()
.map(|_| {
info!(ctx, "shutting down inbox loop");
info!("shutting down inbox loop");
})
.race(fut)
.await;
@@ -212,7 +365,7 @@ async fn fetch_idle(
let folder_config = match folder_meaning.to_config() {
Some(c) => c,
None => {
error!(ctx, "Bad folder meaning: {}", folder_meaning);
error!("Bad folder meaning: {}", folder_meaning);
return connection
.fake_idle(ctx, None, FolderMeaning::Unknown)
.await;
@@ -222,8 +375,8 @@ async fn fetch_idle(
Ok(folder) => folder,
Err(err) => {
warn!(
ctx,
"Can not watch {} folder, failed to retrieve config: {:#}", folder_config, err
"Can not watch {} folder, failed to retrieve config: {:#}",
folder_config, err
);
return connection
.fake_idle(ctx, None, FolderMeaning::Unknown)
@@ -235,7 +388,7 @@ async fn fetch_idle(
watch_folder
} else {
connection.connectivity.set_not_configured(ctx).await;
info!(ctx, "Can not watch {} folder, not set", folder_config);
info!("Can not watch {folder_config} folder, not set");
return connection
.fake_idle(ctx, None, FolderMeaning::Unknown)
.await;
@@ -247,8 +400,8 @@ async fn fetch_idle(
.await
.context("prepare IMAP connection")
{
warn!(ctx, "{:#}", err);
connection.trigger_reconnect(ctx);
warn!("{err:#}");
connection.trigger_reconnect();
return connection
.fake_idle(ctx, Some(watch_folder), folder_meaning)
.await;
@@ -262,7 +415,7 @@ async fn fetch_idle(
.context("store_seen_flags_on_imap")
.ok_or_log(ctx);
} else {
warn!(ctx, "No session even though we just prepared it");
warn!("No session even though we just prepared it");
}
}
@@ -272,8 +425,8 @@ async fn fetch_idle(
.await
.context("fetch_move_delete")
{
connection.trigger_reconnect(ctx);
warn!(ctx, "{:#}", err);
connection.trigger_reconnect();
warn!("{:#}", err);
return InterruptInfo::new(false);
}
@@ -296,7 +449,7 @@ async fn fetch_idle(
Err(err) => {
// Don't reconnect, if there is a problem with the connection we will realize this when IDLEing
// but maybe just one folder can't be selected or something
warn!(ctx, "{:#}", err);
warn!("{:#}", err);
}
Ok(true) => {
// Fetch the watched folder again in case scanning other folder moved messages
@@ -310,8 +463,8 @@ async fn fetch_idle(
.await
.context("fetch_move_delete after scan_folders")
{
connection.trigger_reconnect(ctx);
warn!(ctx, "{:#}", err);
connection.trigger_reconnect();
warn!("{:#}", err);
return InterruptInfo::new(false);
}
}
@@ -328,21 +481,18 @@ async fn fetch_idle(
connection.connectivity.set_connected(ctx).await;
ctx.emit_event(EventType::ImapInboxIdle);
if let Some(session) = connection.session.take() {
if !session.can_idle() {
info!(
ctx,
"IMAP session does not support IDLE, going to fake idle."
);
info!("IMAP session does not support IDLE, going to fake idle.");
return connection
.fake_idle(ctx, Some(watch_folder), folder_meaning)
.await;
}
info!(ctx, "IMAP session supports IDLE, using it.");
info!("IMAP session supports IDLE, using it.");
match session
.idle(
ctx,
connection.idle_interrupt_receiver.clone(),
Some(watch_folder),
)
@@ -354,13 +504,13 @@ async fn fetch_idle(
info
}
Err(err) => {
connection.trigger_reconnect(ctx);
warn!(ctx, "{:#}", err);
connection.trigger_reconnect();
warn!("{:#}", err);
InterruptInfo::new(false)
}
}
} else {
warn!(ctx, "No IMAP session, going to fake idle.");
warn!("No IMAP session, going to fake idle.");
connection
.fake_idle(ctx, Some(watch_folder), folder_meaning)
.await
@@ -375,7 +525,7 @@ async fn simple_imap_loop(
) {
use futures::future::FutureExt;
info!(ctx, "starting simple loop for {}", folder_meaning);
info!("starting simple loop for {folder_meaning}");
let ImapConnectionHandlers {
mut connection,
stop_receiver,
@@ -386,7 +536,7 @@ async fn simple_imap_loop(
let fut = async move {
let ctx = ctx1;
if let Err(err) = started.send(()).await {
warn!(&ctx, "simple imap loop, missing started receiver: {}", err);
warn!("simple imap loop, missing started receiver: {}", err);
return;
}
@@ -398,7 +548,7 @@ async fn simple_imap_loop(
stop_receiver
.recv()
.map(|_| {
info!(ctx, "shutting down simple loop");
info!("shutting down simple loop");
})
.race(fut)
.await;
@@ -407,7 +557,7 @@ async fn simple_imap_loop(
async fn smtp_loop(ctx: Context, started: Sender<()>, smtp_handlers: SmtpConnectionHandlers) {
use futures::future::FutureExt;
info!(ctx, "starting smtp loop");
info!("starting smtp loop");
let SmtpConnectionHandlers {
mut connection,
stop_receiver,
@@ -418,20 +568,19 @@ async fn smtp_loop(ctx: Context, started: Sender<()>, smtp_handlers: SmtpConnect
let fut = async move {
let ctx = ctx1;
if let Err(err) = started.send(()).await {
warn!(&ctx, "smtp loop, missing started receiver: {}", err);
warn!("smtp loop, missing started receiver: {}", err);
return;
}
let mut timeout = None;
loop {
if let Err(err) = send_smtp_messages(&ctx, &mut connection).await {
warn!(ctx, "send_smtp_messages failed: {:#}", err);
warn!("send_smtp_messages failed: {:#}", err);
timeout = Some(timeout.map_or(30, |timeout: u64| timeout.saturating_mul(3)))
} else {
let duration_until_can_send = ctx.ratelimit.read().await.until_can_send();
if !duration_until_can_send.is_zero() {
info!(
ctx,
"smtp got rate limited, waiting for {} until can send again",
duration_to_str(duration_until_can_send)
);
@@ -446,7 +595,7 @@ async fn smtp_loop(ctx: Context, started: Sender<()>, smtp_handlers: SmtpConnect
}
// Fake Idle
info!(ctx, "smtp fake idle - started");
info!("smtp fake idle - started");
match &connection.last_send_error {
None => connection.connectivity.set_connected(&ctx).await,
Some(err) => connection.connectivity.set_err(&ctx, err).await,
@@ -458,8 +607,8 @@ async fn smtp_loop(ctx: Context, started: Sender<()>, smtp_handlers: SmtpConnect
// unnecessary retries.
if let Some(timeout) = timeout {
info!(
ctx,
"smtp has messages to retry, planning to retry {} seconds later", timeout
"smtp has messages to retry, planning to retry {} seconds later",
timeout
);
let duration = std::time::Duration::from_secs(timeout);
tokio::time::timeout(duration, async {
@@ -468,18 +617,18 @@ async fn smtp_loop(ctx: Context, started: Sender<()>, smtp_handlers: SmtpConnect
.await
.unwrap_or_default();
} else {
info!(ctx, "smtp has no messages to retry, waiting for interrupt");
info!("smtp has no messages to retry, waiting for interrupt");
idle_interrupt_receiver.recv().await.unwrap_or_default();
};
info!(ctx, "smtp fake idle - interrupted")
info!("smtp fake idle - interrupted")
}
};
stop_receiver
.recv()
.map(|_| {
info!(ctx, "shutting down smtp loop");
info!("shutting down smtp loop");
})
.race(fut)
.await;
@@ -501,7 +650,12 @@ impl Scheduler {
let (inbox_start_send, inbox_start_recv) = channel::bounded(1);
let handle = {
let ctx = ctx.clone();
task::spawn(inbox_loop(ctx, inbox_start_send, inbox_handlers))
task::spawn(
inbox_loop(ctx, inbox_start_send, inbox_handlers)
.with_current_subscriber()
.bind_current_context_id()
.in_current_span(),
)
};
let inbox = SchedBox {
meaning: FolderMeaning::Inbox,
@@ -521,7 +675,12 @@ impl Scheduler {
let (conn_state, handlers) = ImapConnectionState::new(&ctx).await?;
let (start_send, start_recv) = channel::bounded(1);
let ctx = ctx.clone();
let handle = task::spawn(simple_imap_loop(ctx, start_send, handlers, meaning));
let handle = task::spawn(
simple_imap_loop(ctx, start_send, handlers, meaning)
.with_current_subscriber()
.bind_current_context_id()
.in_current_span(),
);
oboxes.push(SchedBox {
meaning,
conn_state,
@@ -533,22 +692,37 @@ impl Scheduler {
let smtp_handle = {
let ctx = ctx.clone();
task::spawn(smtp_loop(ctx, smtp_start_send, smtp_handlers))
task::spawn(
smtp_loop(ctx, smtp_start_send, smtp_handlers)
.with_current_subscriber()
.bind_current_context_id()
.in_current_span(),
)
};
start_recvs.push(smtp_start_recv);
let ephemeral_handle = {
let ctx = ctx.clone();
task::spawn(async move {
ephemeral::ephemeral_loop(&ctx, ephemeral_interrupt_recv).await;
})
task::spawn(
async move {
ephemeral::ephemeral_loop(&ctx, ephemeral_interrupt_recv).await;
}
.with_current_subscriber()
.bind_current_context_id()
.in_current_span(),
)
};
let location_handle = {
let ctx = ctx.clone();
task::spawn(async move {
location::location_loop(&ctx, location_interrupt_recv).await;
})
task::spawn(
async move {
location::location_loop(&ctx, location_interrupt_recv).await;
}
.with_current_subscriber()
.bind_current_context_id()
.in_current_span(),
)
};
let recently_seen_loop = RecentlySeenLoop::new(ctx.clone());
@@ -570,7 +744,7 @@ impl Scheduler {
bail!("failed to start scheduler: {}", err);
}
info!(ctx, "scheduler is running");
info!("scheduler is running");
Ok(res)
}

View File

@@ -3,7 +3,8 @@ use std::{iter::once, ops::Deref, sync::Arc};
use anyhow::{anyhow, Result};
use humansize::{format_size, BINARY};
use tokio::sync::{Mutex, RwLockReadGuard};
use tokio::sync::Mutex;
use tracing::info;
use crate::events::EventType;
use crate::imap::{scan_folders::get_watched_folder_configs, FolderMeaning};
@@ -12,7 +13,7 @@ use crate::quota::{
};
use crate::tools::time;
use crate::{context::Context, log::LogExt};
use crate::{scheduler::Scheduler, stock_str, tools};
use crate::{stock_str, tools};
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumProperty, PartialOrd, Ord)]
pub enum Connectivity {
@@ -156,19 +157,7 @@ impl ConnectivityStore {
/// Set all folder states to InterruptingIdle in case they were `Connected` before.
/// Called during `dc_maybe_network()` to make sure that `dc_accounts_all_work_done()`
/// returns false immediately after `dc_maybe_network()`.
pub(crate) async fn idle_interrupted(scheduler: RwLockReadGuard<'_, Option<Scheduler>>) {
let (inbox, oboxes) = match &*scheduler {
Some(Scheduler { inbox, oboxes, .. }) => (
inbox.conn_state.state.connectivity.clone(),
oboxes
.iter()
.map(|b| b.conn_state.state.connectivity.clone())
.collect::<Vec<_>>(),
),
None => return,
};
drop(scheduler);
pub(crate) async fn idle_interrupted(inbox: ConnectivityStore, oboxes: Vec<ConnectivityStore>) {
let mut connectivity_lock = inbox.0.lock().await;
// For the inbox, we also have to set the connectivity to InterruptingIdle if it was
// NotConfigured before: If all folders are NotConfigured, dc_get_connectivity()
@@ -195,19 +184,7 @@ pub(crate) async fn idle_interrupted(scheduler: RwLockReadGuard<'_, Option<Sched
/// Set the connectivity to "Not connected" after a call to dc_maybe_network_lost().
/// If we did not do this, the connectivity would stay "Connected" for quite a long time
/// after `maybe_network_lost()` was called.
pub(crate) async fn maybe_network_lost(
context: &Context,
scheduler: RwLockReadGuard<'_, Option<Scheduler>>,
) {
let stores: Vec<_> = match &*scheduler {
Some(sched) => sched
.boxes()
.map(|b| b.conn_state.state.connectivity.clone())
.collect(),
None => return,
};
drop(scheduler);
pub(crate) async fn maybe_network_lost(context: &Context, stores: Vec<ConnectivityStore>) {
for store in &stores {
let mut connectivity_lock = store.0.lock().await;
if !matches!(
@@ -249,9 +226,9 @@ impl Context {
///
/// If the connectivity changes, a DC_EVENT_CONNECTIVITY_CHANGED will be emitted.
pub async fn get_connectivity(&self) -> Connectivity {
let lock = self.scheduler.read().await;
let stores: Vec<_> = match &*lock {
Some(sched) => sched
let lock = self.scheduler.inner.read().await;
let stores: Vec<_> = match lock.scheduler {
Some(ref sched) => sched
.boxes()
.map(|b| b.conn_state.state.connectivity.clone())
.collect(),
@@ -332,9 +309,9 @@ impl Context {
// Get the states from the RwLock
// =============================================================================================
let lock = self.scheduler.read().await;
let (folders_states, smtp) = match &*lock {
Some(sched) => (
let lock = self.scheduler.inner.read().await;
let (folders_states, smtp) = match lock.scheduler {
Some(ref sched) => (
sched
.boxes()
.map(|b| (b.meaning, b.conn_state.state.connectivity.clone()))
@@ -432,7 +409,7 @@ impl Context {
ret +=
&format!("<b>{}:</b> ", &*escaper::encode_minimal(root_name));
} else {
info!(self, "connectivity: root name hidden: \"{}\"", root_name);
info!("connectivity: root name hidden: \"{}\"", root_name);
}
let messages = stock_str::messages(self).await;
@@ -503,9 +480,9 @@ impl Context {
/// Returns true if all background work is done.
pub async fn all_work_done(&self) -> bool {
let lock = self.scheduler.read().await;
let stores: Vec<_> = match &*lock {
Some(sched) => sched
let lock = self.scheduler.inner.read().await;
let stores: Vec<_> = match lock.scheduler {
Some(ref sched) => sched
.boxes()
.map(|b| &b.conn_state.state)
.chain(once(&sched.smtp.state))

View File

@@ -4,6 +4,7 @@ use std::convert::TryFrom;
use anyhow::{bail, Context as _, Error, Result};
use percent_encoding::{utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC};
use tracing::{error, info, warn};
use crate::aheader::EncryptPreference;
use crate::chat::{self, Chat, ChatId, ChatIdBlocked};
@@ -124,7 +125,7 @@ pub async fn get_securejoin_qr(context: &Context, group: Option<ChatId>) -> Resu
)
};
info!(context, "Generated QR code: {}", qr);
info!("Generated QR code: {}", qr);
Ok(qr)
}
@@ -133,7 +134,7 @@ async fn get_self_fingerprint(context: &Context) -> Option<Fingerprint> {
match SignedPublicKey::load_self(context).await {
Ok(key) => Some(key.fingerprint()),
Err(_) => {
warn!(context, "get_self_fingerprint(): failed to load key");
warn!("get_self_fingerprint(): failed to load key");
None
}
}
@@ -147,9 +148,9 @@ async fn get_self_fingerprint(context: &Context) -> Option<Fingerprint> {
/// The function returns immediately and the handshake will run in background.
pub async fn join_securejoin(context: &Context, qr: &str) -> Result<ChatId> {
securejoin(context, qr).await.map_err(|err| {
warn!(context, "Fatal joiner error: {:#}", err);
warn!("Fatal joiner error: {:#}", err);
// The user just scanned this QR code so has context on what failed.
error!(context, "QR process failed");
error!("QR process failed");
err
})
}
@@ -160,7 +161,7 @@ async fn securejoin(context: &Context, qr: &str) -> Result<ChatId> {
==== Step 2 in "Setup verified contact" protocol =====
========================================================*/
info!(context, "Requesting secure-join ...",);
info!("Requesting secure-join ...",);
let qr_scan = check_qr(context, qr).await?;
let invite = QrInvite::try_from(qr_scan)?;
@@ -215,7 +216,6 @@ async fn fingerprint_equals_sender(
Ok(peerstate) => peerstate,
Err(err) => {
warn!(
context,
"Failed to sender peerstate for {}: {}",
contact.get_addr(),
err
@@ -288,8 +288,8 @@ pub(crate) async fn handle_securejoin_handshake(
.context("Not a Secure-Join message")?;
info!(
context,
">>>>>>>>>>>>>>>>>>>>>>>>> secure-join message \'{}\' received", step,
">>>>>>>>>>>>>>>>>>>>>>>>> secure-join message \'{}\' received",
step,
);
let join_vg = step.starts_with("vg-");
@@ -308,15 +308,15 @@ pub(crate) async fn handle_securejoin_handshake(
let invitenumber = match mime_message.get_header(HeaderDef::SecureJoinInvitenumber) {
Some(n) => n,
None => {
warn!(context, "Secure-join denied (invitenumber missing)");
warn!("Secure-join denied (invitenumber missing)");
return Ok(HandshakeMessage::Ignore);
}
};
if !token::exists(context, token::Namespace::InviteNumber, invitenumber).await {
warn!(context, "Secure-join denied (bad invitenumber).");
warn!("Secure-join denied (bad invitenumber).");
return Ok(HandshakeMessage::Ignore);
}
info!(context, "Secure-join requested.",);
info!("Secure-join requested.",);
inviter_progress!(context, contact_id, 300);
@@ -366,7 +366,7 @@ pub(crate) async fn handle_securejoin_handshake(
return Ok(HandshakeMessage::Ignore);
}
};
if !encrypted_and_signed(context, mime_message, Some(&fingerprint)) {
if !encrypted_and_signed(mime_message, Some(&fingerprint)) {
could_not_establish_secure_connection(
context,
contact_id,
@@ -386,7 +386,7 @@ pub(crate) async fn handle_securejoin_handshake(
.await?;
return Ok(HandshakeMessage::Ignore);
}
info!(context, "Fingerprint verified.",);
info!("Fingerprint verified.",);
// verify that the `Secure-Join-Auth:`-header matches the secret written to the QR code
let auth_0 = match mime_message.get_header(HeaderDef::SecureJoinAuth) {
Some(auth) => auth,
@@ -429,7 +429,7 @@ pub(crate) async fn handle_securejoin_handshake(
return Ok(HandshakeMessage::Ignore);
}
Contact::scaleup_origin_by_id(context, contact_id, Origin::SecurejoinInvited).await?;
info!(context, "Auth verified.",);
info!("Auth verified.",);
context.emit_event(EventType::ContactsChanged(Some(contact_id)));
inviter_progress!(context, contact_id, 600);
if join_vg {
@@ -439,7 +439,7 @@ pub(crate) async fn handle_securejoin_handshake(
let field_grpid = match mime_message.get_header(HeaderDef::SecureJoinGroup) {
Some(s) => s.as_str(),
None => {
warn!(context, "Missing Secure-Join-Group header");
warn!("Missing Secure-Join-Group header");
return Ok(HandshakeMessage::Ignore);
}
};
@@ -450,7 +450,7 @@ pub(crate) async fn handle_securejoin_handshake(
chat::add_contact_to_chat_ex(context, group_chat_id, contact_id, true)
.await
{
error!(context, "failed to add contact: {}", err);
error!("failed to add contact: {}", err);
}
}
None => bail!("Chat {} not found", &field_grpid),
@@ -501,7 +501,7 @@ pub(crate) async fn handle_securejoin_handshake(
if let Ok(contact) = Contact::get_by_id(context, contact_id).await {
if contact.is_verified(context).await? == VerifiedStatus::Unverified {
warn!(context, "{} invalid.", step);
warn!("{} invalid.", step);
return Ok(HandshakeMessage::Ignore);
}
if join_vg {
@@ -510,7 +510,7 @@ pub(crate) async fn handle_securejoin_handshake(
.map(|s| s.as_str())
.unwrap_or_else(|| "");
if let Err(err) = chat::get_chat_id_by_grpid(context, field_grpid).await {
warn!(context, "Failed to lookup chat_id from grpid: {}", err);
warn!("Failed to lookup chat_id from grpid: {}", err);
return Err(
err.context(format!("Chat for group {} not found", &field_grpid))
);
@@ -518,12 +518,12 @@ pub(crate) async fn handle_securejoin_handshake(
}
Ok(HandshakeMessage::Ignore) // "Done" deletes the message and breaks multi-device
} else {
warn!(context, "{} invalid.", step);
warn!("{} invalid.", step);
Ok(HandshakeMessage::Ignore)
}
}
_ => {
warn!(context, "invalid step: {}", step);
warn!("invalid step: {}", step);
Ok(HandshakeMessage::Ignore)
}
}
@@ -557,7 +557,7 @@ pub(crate) async fn observe_securejoin_on_other_device(
let step = mime_message
.get_header(HeaderDef::SecureJoin)
.context("Not a Secure-Join message")?;
info!(context, "observing secure-join message \'{}\'", step);
info!("observing secure-join message \'{}\'", step);
match step.as_str() {
"vg-request-with-auth"
@@ -566,11 +566,7 @@ pub(crate) async fn observe_securejoin_on_other_device(
| "vc-contact-confirm"
| "vg-member-added-received"
| "vc-contact-confirm-received" => {
if !encrypted_and_signed(
context,
mime_message,
get_self_fingerprint(context).await.as_ref(),
) {
if !encrypted_and_signed(mime_message, get_self_fingerprint(context).await.as_ref()) {
could_not_establish_secure_connection(
context,
contact_id,
@@ -715,8 +711,8 @@ async fn could_not_establish_secure_connection(
let msg = stock_str::contact_not_verified(context, &contact).await;
chat::add_info_msg(context, chat_id, &msg, time()).await?;
warn!(
context,
"StockMessage::ContactNotVerified posted to 1:1 chat ({})", details
"StockMessage::ContactNotVerified posted to 1:1 chat ({})",
details
);
Ok(())
}
@@ -733,7 +729,7 @@ async fn mark_peer_as_verified(
PeerstateVerifiedStatus::BidirectVerified,
verifier,
) {
error!(context, "Could not mark peer as verified: {}", err);
error!("Could not mark peer as verified: {}", err);
return Err(err);
}
peerstate.prefer_encrypt = EncryptPreference::Mutual;
@@ -749,25 +745,24 @@ async fn mark_peer_as_verified(
******************************************************************************/
fn encrypted_and_signed(
context: &Context,
mimeparser: &MimeMessage,
expected_fingerprint: Option<&Fingerprint>,
) -> bool {
if !mimeparser.was_encrypted() {
warn!(context, "Message not encrypted.",);
warn!("Message not encrypted.",);
false
} else if let Some(expected_fingerprint) = expected_fingerprint {
if !mimeparser.signatures.contains(expected_fingerprint) {
warn!(
context,
"Message does not match expected fingerprint {}.", expected_fingerprint,
"Message does not match expected fingerprint {}.",
expected_fingerprint,
);
false
} else {
true
}
} else {
warn!(context, "Fingerprint for comparison missing.");
warn!("Fingerprint for comparison missing.");
false
}
}

View File

@@ -4,6 +4,7 @@
//! the required user interactions.
use anyhow::{Context as _, Result};
use tracing::{error, warn};
use super::bobstate::{BobHandshakeStage, BobState};
use super::qrinvite::QrInvite;
@@ -45,7 +46,7 @@ pub(super) async fn start_protocol(context: &Context, invite: QrInvite) -> Resul
let (state, stage, aborted_states) =
BobState::start_protocol(context, invite.clone(), chat_id).await?;
for state in aborted_states {
error!(context, "Aborting previously unfinished QR Join process.");
error!("Aborting previously unfinished QR Join process.");
state.notify_aborted(context, "new QR scanned").await?;
state.emit_progress(context, JoinerProgress::Error);
}
@@ -133,10 +134,7 @@ pub(super) async fn handle_contact_confirm(
Ok(retval)
}
Some(_) => {
warn!(
context,
"Impossible state returned from handling handshake message"
);
warn!("Impossible state returned from handling handshake message");
Ok(retval)
}
None => Ok(retval),
@@ -208,8 +206,8 @@ impl BobState {
let chat_id = self.joining_chat_id(context).await?;
chat::add_info_msg(context, chat_id, &msg, time()).await?;
warn!(
context,
"StockMessage::ContactNotVerified posted to joining chat ({})", why
"StockMessage::ContactNotVerified posted to joining chat ({})",
why
);
Ok(())
}

View File

@@ -9,6 +9,7 @@
use anyhow::{Error, Result};
use rusqlite::Connection;
use tracing::{info, warn};
use super::qrinvite::QrInvite;
use super::{encrypted_and_signed, fingerprint_equals_sender, mark_peer_as_verified};
@@ -96,7 +97,7 @@ impl BobState {
if fingerprint_equals_sender(context, invite.fingerprint(), invite.contact_id()).await?
{
// The scanned fingerprint matches Alice's key, we can proceed to step 4b.
info!(context, "Taking securejoin protocol shortcut");
info!("Taking securejoin protocol shortcut");
send_handshake_message(context, &invite, chat_id, BobHandshakeMsg::RequestWithAuth)
.await?;
(
@@ -245,15 +246,14 @@ impl BobState {
Some(step) => step,
None => {
warn!(
context,
"Message has no Secure-Join header: {}",
mime_message.get_rfc724_mid().unwrap_or_default()
);
return Ok(None);
}
};
if !self.is_msg_expected(context, step.as_str()) {
info!(context, "{} message out of sync for BobState", step);
if !self.is_msg_expected(step.as_str()) {
info!("{} message out of sync for BobState", step);
return Ok(None);
}
match step.as_str() {
@@ -264,19 +264,19 @@ impl BobState {
self.step_contact_confirm(context, mime_message).await
}
_ => {
warn!(context, "Invalid step for BobState: {}", step);
warn!("Invalid step for BobState: {}", step);
Ok(None)
}
}
}
/// Returns `true` if the message is expected according to the protocol.
fn is_msg_expected(&self, context: &Context, step: &str) -> bool {
fn is_msg_expected(&self, step: &str) -> bool {
let variant_matches = match self.invite {
QrInvite::Contact { .. } => step.starts_with("vc-"),
QrInvite::Group { .. } => step.starts_with("vg-"),
};
let step_matches = self.next.matches(context, step);
let step_matches = self.next.matches(step);
variant_matches && step_matches
}
@@ -289,11 +289,8 @@ impl BobState {
context: &Context,
mime_message: &MimeMessage,
) -> Result<Option<BobHandshakeStage>> {
info!(
context,
"Bob Step 4 - handling vc-auth-require/vg-auth-required message"
);
if !encrypted_and_signed(context, mime_message, Some(self.invite.fingerprint())) {
info!("Bob Step 4 - handling vc-auth-require/vg-auth-required message");
if !encrypted_and_signed(mime_message, Some(self.invite.fingerprint())) {
let reason = if mime_message.was_encrypted() {
"Valid signature missing"
} else {
@@ -310,7 +307,7 @@ impl BobState {
.await?;
return Ok(Some(BobHandshakeStage::Terminated("Fingerprint mismatch")));
}
info!(context, "Fingerprint verified.",);
info!("Fingerprint verified.",);
self.update_next(&context.sql, SecureJoinStep::ContactConfirm)
.await?;
self.send_handshake_message(context, BobHandshakeMsg::RequestWithAuth)
@@ -331,10 +328,7 @@ impl BobState {
context: &Context,
mime_message: &MimeMessage,
) -> Result<Option<BobHandshakeStage>> {
info!(
context,
"Bob Step 7 - handling vc-contact-confirm/vg-member-added message"
);
info!("Bob Step 7 - handling vc-contact-confirm/vg-member-added message");
let vg_expect_encrypted = match self.invite {
QrInvite::Contact { .. } => {
// setup-contact is always encrypted
@@ -357,7 +351,7 @@ impl BobState {
}
};
if vg_expect_encrypted
&& !encrypted_and_signed(context, mime_message, Some(self.invite.fingerprint()))
&& !encrypted_and_signed(mime_message, Some(self.invite.fingerprint()))
{
self.update_next(&context.sql, SecureJoinStep::Terminated)
.await?;
@@ -381,7 +375,7 @@ impl BobState {
.map(|s| s.as_str())
.ok_or_else(|| Error::msg("Missing Chat-Group-Member-Added header"))?;
if !context.is_self_addr(member_added).await? {
info!(context, "Message belongs to a different handshake (scaled up contact anyway to allow creation of group).");
info!( "Message belongs to a different handshake (scaled up contact anyway to allow creation of group).");
return Ok(None);
}
}
@@ -389,10 +383,7 @@ impl BobState {
self.send_handshake_message(context, BobHandshakeMsg::ContactConfirmReceived)
.await
.map_err(|_| {
warn!(
context,
"Failed to send vc-contact-confirm-received/vg-member-added-received"
);
warn!("Failed to send vc-contact-confirm-received/vg-member-added-received");
})
// This is not an error affecting the protocol outcome.
.ok();
@@ -530,16 +521,16 @@ pub enum SecureJoinStep {
impl SecureJoinStep {
/// Compares the legacy string representation of a step to a [`SecureJoinStep`] variant.
fn matches(&self, context: &Context, step: &str) -> bool {
fn matches(&self, step: &str) -> bool {
match self {
Self::AuthRequired => step == "vc-auth-required" || step == "vg-auth-required",
Self::ContactConfirm => step == "vc-contact-confirm" || step == "vg-member-added",
SecureJoinStep::Terminated => {
warn!(context, "Terminated state for next securejoin step");
warn!("Terminated state for next securejoin step");
false
}
SecureJoinStep::Completed => {
warn!(context, "Completed state for next securejoin step");
warn!("Completed state for next securejoin step");
false
}
}

View File

@@ -7,8 +7,9 @@ use std::time::{Duration, SystemTime};
use anyhow::{bail, format_err, Context as _, Error, Result};
use async_smtp::response::{Category, Code, Detail};
use async_smtp::{self as smtp, EmailAddress, SmtpTransport};
use tokio::io::BufWriter;
use tokio::io::BufStream;
use tokio::task;
use tracing::{error, info, warn};
use crate::config::Config;
use crate::contact::{Contact, ContactId};
@@ -18,7 +19,7 @@ use crate::message::Message;
use crate::message::{self, MsgId};
use crate::mimefactory::MimeFactory;
use crate::net::connect_tcp;
use crate::net::session::SessionStream;
use crate::net::session::SessionBufStream;
use crate::net::tls::wrap_tls;
use crate::oauth2::get_oauth2_access_token;
use crate::provider::Socket;
@@ -32,7 +33,7 @@ const SMTP_TIMEOUT: Duration = Duration::from_secs(30);
#[derive(Default)]
pub(crate) struct Smtp {
/// SMTP connection.
transport: Option<SmtpTransport<Box<dyn SessionStream>>>,
transport: Option<SmtpTransport<Box<dyn SessionBufStream>>>,
/// Email address we are sending from.
from: Option<EmailAddress>,
@@ -87,7 +88,7 @@ impl Smtp {
/// Connect using configured parameters.
pub async fn connect_configured(&mut self, context: &Context) -> Result<()> {
if self.has_maybe_stale_connection() {
info!(context, "Closing stale connection");
info!("Closing stale connection");
self.disconnect().await;
}
@@ -116,13 +117,13 @@ impl Smtp {
port: u16,
strict_tls: bool,
socks5_config: Socks5Config,
) -> Result<SmtpTransport<Box<dyn SessionStream>>> {
) -> Result<SmtpTransport<Box<dyn SessionBufStream>>> {
let socks5_stream = socks5_config
.connect(context, hostname, port, SMTP_TIMEOUT, strict_tls)
.await?;
let tls_stream = wrap_tls(strict_tls, hostname, socks5_stream).await?;
let buffered_stream = BufWriter::new(tls_stream);
let session_stream: Box<dyn SessionStream> = Box::new(buffered_stream);
let buffered_stream = BufStream::new(tls_stream);
let session_stream: Box<dyn SessionBufStream> = Box::new(buffered_stream);
let client = smtp::SmtpClient::new().smtp_utf8(true);
let transport = SmtpTransport::new(client, session_stream).await?;
Ok(transport)
@@ -135,20 +136,20 @@ impl Smtp {
port: u16,
strict_tls: bool,
socks5_config: Socks5Config,
) -> Result<SmtpTransport<Box<dyn SessionStream>>> {
) -> Result<SmtpTransport<Box<dyn SessionBufStream>>> {
let socks5_stream = socks5_config
.connect(context, hostname, port, SMTP_TIMEOUT, strict_tls)
.await?;
// Run STARTTLS command and convert the client back into a stream.
let client = smtp::SmtpClient::new().smtp_utf8(true);
let transport = SmtpTransport::new(client, socks5_stream).await?;
let tcp_stream = transport.starttls().await?;
let transport = SmtpTransport::new(client, BufStream::new(socks5_stream)).await?;
let tcp_stream = transport.starttls().await?.into_inner();
let tls_stream = wrap_tls(strict_tls, hostname, tcp_stream)
.await
.context("STARTTLS upgrade failed")?;
let buffered_stream = BufWriter::new(tls_stream);
let session_stream: Box<dyn SessionStream> = Box::new(buffered_stream);
let buffered_stream = BufStream::new(tls_stream);
let session_stream: Box<dyn SessionBufStream> = Box::new(buffered_stream);
let client = smtp::SmtpClient::new().smtp_utf8(true).without_greeting();
let transport = SmtpTransport::new(client, session_stream).await?;
Ok(transport)
@@ -160,12 +161,12 @@ impl Smtp {
hostname: &str,
port: u16,
socks5_config: Socks5Config,
) -> Result<SmtpTransport<Box<dyn SessionStream>>> {
) -> Result<SmtpTransport<Box<dyn SessionBufStream>>> {
let socks5_stream = socks5_config
.connect(context, hostname, port, SMTP_TIMEOUT, false)
.await?;
let buffered_stream = BufWriter::new(socks5_stream);
let session_stream: Box<dyn SessionStream> = Box::new(buffered_stream);
let buffered_stream = BufStream::new(socks5_stream);
let session_stream: Box<dyn SessionBufStream> = Box::new(buffered_stream);
let client = smtp::SmtpClient::new().smtp_utf8(true);
let transport = SmtpTransport::new(client, session_stream).await?;
Ok(transport)
@@ -177,11 +178,11 @@ impl Smtp {
hostname: &str,
port: u16,
strict_tls: bool,
) -> Result<SmtpTransport<Box<dyn SessionStream>>> {
) -> Result<SmtpTransport<Box<dyn SessionBufStream>>> {
let tcp_stream = connect_tcp(context, hostname, port, SMTP_TIMEOUT, false).await?;
let tls_stream = wrap_tls(strict_tls, hostname, tcp_stream).await?;
let buffered_stream = BufWriter::new(tls_stream);
let session_stream: Box<dyn SessionStream> = Box::new(buffered_stream);
let buffered_stream = BufStream::new(tls_stream);
let session_stream: Box<dyn SessionBufStream> = Box::new(buffered_stream);
let client = smtp::SmtpClient::new().smtp_utf8(true);
let transport = SmtpTransport::new(client, session_stream).await?;
Ok(transport)
@@ -193,18 +194,18 @@ impl Smtp {
hostname: &str,
port: u16,
strict_tls: bool,
) -> Result<SmtpTransport<Box<dyn SessionStream>>> {
) -> Result<SmtpTransport<Box<dyn SessionBufStream>>> {
let tcp_stream = connect_tcp(context, hostname, port, SMTP_TIMEOUT, strict_tls).await?;
// Run STARTTLS command and convert the client back into a stream.
let client = smtp::SmtpClient::new().smtp_utf8(true);
let transport = SmtpTransport::new(client, tcp_stream).await?;
let tcp_stream = transport.starttls().await?;
let transport = SmtpTransport::new(client, BufStream::new(tcp_stream)).await?;
let tcp_stream = transport.starttls().await?.into_inner();
let tls_stream = wrap_tls(strict_tls, hostname, tcp_stream)
.await
.context("STARTTLS upgrade failed")?;
let buffered_stream = BufWriter::new(tls_stream);
let session_stream: Box<dyn SessionStream> = Box::new(buffered_stream);
let buffered_stream = BufStream::new(tls_stream);
let session_stream: Box<dyn SessionBufStream> = Box::new(buffered_stream);
let client = smtp::SmtpClient::new().smtp_utf8(true).without_greeting();
let transport = SmtpTransport::new(client, session_stream).await?;
Ok(transport)
@@ -215,10 +216,10 @@ impl Smtp {
context: &Context,
hostname: &str,
port: u16,
) -> Result<SmtpTransport<Box<dyn SessionStream>>> {
) -> Result<SmtpTransport<Box<dyn SessionBufStream>>> {
let tcp_stream = connect_tcp(context, hostname, port, SMTP_TIMEOUT, false).await?;
let buffered_stream = BufWriter::new(tcp_stream);
let session_stream: Box<dyn SessionStream> = Box::new(buffered_stream);
let buffered_stream = BufStream::new(tcp_stream);
let session_stream: Box<dyn SessionBufStream> = Box::new(buffered_stream);
let client = smtp::SmtpClient::new().smtp_utf8(true);
let transport = SmtpTransport::new(client, session_stream).await?;
Ok(transport)
@@ -234,7 +235,7 @@ impl Smtp {
provider_strict_tls: bool,
) -> Result<()> {
if self.is_connected() {
warn!(context, "SMTP already connected.");
warn!("SMTP already connected.");
return Ok(());
}
@@ -364,7 +365,7 @@ pub(crate) async fn smtp_send(
msg_id: MsgId,
) -> SendResult {
if std::env::var(crate::DCC_MIME_DEBUG).is_ok() {
info!(context, "smtp-sending out mime message:");
info!("smtp-sending out mime message:");
println!("{message}");
}
@@ -385,7 +386,7 @@ pub(crate) async fn smtp_send(
let status = match send_result {
Err(crate::smtp::send::Error::SmtpSend(err)) => {
// Remote error, retry later.
info!(context, "SMTP failed to send: {:?}", &err);
info!("SMTP failed to send: {:?}", &err);
let res = match err {
async_smtp::error::Error::Permanent(ref response) => {
@@ -412,10 +413,10 @@ pub(crate) async fn smtp_send(
};
if maybe_transient {
info!(context, "Permanent error that is likely to actually be transient, postponing retry for later");
info!( "Permanent error that is likely to actually be transient, postponing retry for later");
SendResult::Retry
} else {
info!(context, "Permanent error, message sending failed");
info!("Permanent error, message sending failed");
// If we do not retry, add an info message to the chat.
// Yandex error "554 5.7.1 [2] Message rejected under suspicion of SPAM; https://ya.cc/..."
// should definitely go here, because user has to open the link to
@@ -436,27 +437,22 @@ pub(crate) async fn smtp_send(
// Any extended smtp status codes like x.1.1, x.1.2 or x.1.3 that we
// receive as a transient error are misconfigurations of the smtp server.
// See <https://tools.ietf.org/html/rfc3463#section-3.2>
info!(context, "Received extended status code {} for a transient error. This looks like a misconfigured SMTP server, let's fail immediately", first_word);
info!("Received extended status code {} for a transient error. This looks like a misconfigured SMTP server, let's fail immediately", first_word);
SendResult::Failure(format_err!("Permanent SMTP error: {}", err))
} else {
info!(
context,
"Transient error with status code {}, postponing retry for later",
first_word
);
SendResult::Retry
}
} else {
info!(
context,
"Transient error without status code, postponing retry for later"
);
info!("Transient error without status code, postponing retry for later");
SendResult::Retry
}
}
_ => {
info!(
context,
"Message sending failed without error returned by the server, retry later"
);
SendResult::Retry
@@ -464,7 +460,7 @@ pub(crate) async fn smtp_send(
};
// this clears last_success info
info!(context, "Failed to send message over SMTP, disconnecting");
info!("Failed to send message over SMTP, disconnecting");
smtp.disconnect().await;
res
@@ -472,19 +468,19 @@ pub(crate) async fn smtp_send(
Err(crate::smtp::send::Error::Envelope(err)) => {
// Local error, job is invalid, do not retry.
smtp.disconnect().await;
warn!(context, "SMTP job is invalid: {}", err);
warn!("SMTP job is invalid: {}", err);
SendResult::Failure(err)
}
Err(crate::smtp::send::Error::NoTransport) => {
// Should never happen.
// It does not even make sense to disconnect here.
error!(context, "SMTP job failed because SMTP has no transport");
error!("SMTP job failed because SMTP has no transport");
SendResult::Failure(format_err!("SMTP has not transport"))
}
Err(crate::smtp::send::Error::Other(err)) => {
// Local error, job is invalid, do not retry.
smtp.disconnect().await;
warn!(context, "unable to load job: {}", err);
warn!("unable to load job: {}", err);
SendResult::Failure(err)
}
Ok(()) => SendResult::Success,
@@ -551,8 +547,8 @@ pub(crate) async fn send_msg_to_smtp(
bail!("Number of retries exceeded the limit");
}
info!(
context,
"Try number {} to send message {} over SMTP", retries, msg_id
"Try number {} to send message {} over SMTP",
retries, msg_id
);
let recipients_list = recipients
@@ -561,7 +557,7 @@ pub(crate) async fn send_msg_to_smtp(
|addr| match async_smtp::EmailAddress::new(addr.to_string()) {
Ok(addr) => Some(addr),
Err(err) => {
warn!(context, "invalid recipient: {} {:?}", addr, err);
warn!("invalid recipient: {} {:?}", addr, err);
None
}
},
@@ -574,10 +570,7 @@ pub(crate) async fn send_msg_to_smtp(
.await
.with_context(|| format!("failed to check message {msg_id} existence"))?
{
info!(
context,
"Sending of message {} was cancelled by the user.", msg_id
);
info!("Sending of message {} was cancelled by the user.", msg_id);
return Ok(());
}
@@ -607,7 +600,7 @@ pub(crate) async fn send_msg_to_smtp(
async fn send_mdns(context: &Context, connection: &mut Smtp) -> Result<()> {
loop {
if !context.ratelimit.read().await.can_send() {
info!(context, "Ratelimiter does not allow sending MDNs now");
info!("Ratelimiter does not allow sending MDNs now");
return Ok(());
}
@@ -715,7 +708,7 @@ async fn send_mdn_msg_id(
match smtp_send(context, &recipients, &body, smtp, msg_id).await {
SendResult::Success => {
info!(context, "Successfully sent MDN for {}", msg_id);
info!("Successfully sent MDN for {}", msg_id);
context
.sql
.execute("DELETE FROM smtp_mdns WHERE msg_id = ?", paramsv![msg_id])
@@ -733,10 +726,7 @@ async fn send_mdn_msg_id(
Ok(())
}
SendResult::Retry => {
info!(
context,
"Temporary SMTP failure while sending an MDN for {}", msg_id
);
info!("Temporary SMTP failure while sending an MDN for {}", msg_id);
Ok(())
}
SendResult::Failure(err) => Err(err),
@@ -751,7 +741,7 @@ async fn send_mdn(context: &Context, smtp: &mut Smtp) -> Result<bool> {
context.sql.execute("DELETE FROM smtp_mdns", []).await?;
return Ok(false);
}
info!(context, "Sending MDNs");
info!("Sending MDNs");
context
.sql

View File

@@ -1,6 +1,7 @@
//! # SMTP message sending
use async_smtp::{EmailAddress, Envelope, SendableEmail};
use tracing::warn;
use super::Smtp;
use crate::config::Config;
@@ -70,8 +71,8 @@ impl Smtp {
self.last_success = Some(std::time::SystemTime::now());
} else {
warn!(
context,
"uh? SMTP has no transport, failed to send to {}", recipients_display
"uh? SMTP has no transport, failed to send to {}",
recipients_display
);
return Err(Error::NoTransport);
}

View File

@@ -22,6 +22,7 @@ use crate::param::{Param, Params};
use crate::peerstate::{deduplicate_peerstates, Peerstate};
use crate::stock_str;
use crate::tools::{delete_file, time};
use tracing::{error, info, warn};
#[allow(missing_docs)]
#[macro_export]
@@ -216,7 +217,7 @@ impl Sql {
// the structure is complete now and all objects are usable
if recalc_fingerprints {
info!(context, "[migration] recalc fingerprints");
info!("[migration] recalc fingerprints");
let addrs = self
.query_map(
"SELECT addr FROM acpeerstates;",
@@ -265,7 +266,7 @@ impl Sql {
.await?
}
Err(e) => {
warn!(context, "Migrations can't recode avatar, removing. {:#}", e);
warn!("Migrations can't recode avatar, removing. {:#}", e);
context.set_config(Config::Selfavatar, None).await?
}
}
@@ -280,8 +281,8 @@ impl Sql {
pub async fn open(&self, context: &Context, passphrase: String) -> Result<()> {
if self.is_open().await {
error!(
context,
"Cannot open, database \"{:?}\" already opened.", self.dbfile,
"Cannot open, database \"{:?}\" already opened.",
self.dbfile,
);
bail!("SQL database is already opened.");
}
@@ -291,7 +292,7 @@ impl Sql {
self.close().await;
Err(err)
} else {
info!(context, "Opened database {:?}.", self.dbfile);
info!("Opened database {:?}.", self.dbfile);
*self.is_encrypted.write().await = Some(passphrase_nonempty);
// setup debug logging if there is an entry containing its id
@@ -306,37 +307,49 @@ impl Sql {
}
}
/// Locks the write transactions mutex.
/// We do not make all transactions
/// [IMMEDIATE](https://www.sqlite.org/lang_transaction.html#deferred_immediate_and_exclusive_transactions)
/// for more parallelism -- at least read transactions can be made DEFERRED to run in parallel
/// w/o any drawbacks. But if we make write transactions DEFERRED also w/o any external locking,
/// then they are upgraded from read to write ones on the first write statement. This has some
/// drawbacks:
/// - If there are other write transactions, we block the thread and the db connection until
/// upgraded. Also if some reader comes then, it has to get next, less used connection with a
/// worse per-connection page cache.
/// - If a transaction is blocked for more than busy_timeout, it fails with SQLITE_BUSY.
/// - Configuring busy_timeout is not the best way to manage transaction timeouts, we would
/// prefer it to be integrated with Rust/tokio asyncs. Moreover, SQLite implements waiting
/// using sleeps.
/// - If upon a successful upgrade to a write transaction the db has been modified by another
/// one, the transaction has to be rolled back and retried. It is an extra work in terms of
/// Locks the write transactions mutex in order to make sure that there never are
/// multiple write transactions at once.
///
/// Doing the locking ourselves instead of relying on SQLite has these reasons:
///
/// - SQLite's locking mechanism is non-async, blocking a thread
/// - SQLite's locking mechanism just sleeps in a loop, which is really inefficient
///
/// ---
///
/// More considerations on alternatives to the current approach:
///
/// We use [DEFERRED](https://www.sqlite.org/lang_transaction.html#deferred_immediate_and_exclusive_transactions) transactions.
///
/// In order to never get concurrency issues, we could make all transactions IMMEDIATE,
/// but this would mean that there can never be two simultaneous transactions.
///
/// Read transactions can simply be made DEFERRED to run in parallel w/o any drawbacks.
///
/// DEFERRED write transactions without doing the locking ourselves would have these drawbacks:
///
/// 1. As mentioned above, SQLite's locking mechanism is non-async and sleeps in a loop.
/// 2. If there are other write transactions, we block the db connection until
/// upgraded. If some reader comes then, it has to get the next, less used connection with a
/// worse per-connection page cache (SQLite allows one write and any number of reads in parallel).
/// 3. If a transaction is blocked for more than `busy_timeout`, it fails with SQLITE_BUSY.
/// 4. If upon a successful upgrade to a write transaction the db has been modified,
/// the transaction has to be rolled back and retried, which means extra work in terms of
/// CPU/battery.
/// - Maybe minor, but we lose some fairness in servicing write transactions, i.e. we service
/// them in the order of the first write statement, not in the order they come.
/// The only pro of making write transactions DEFERRED w/o the external locking is some
/// parallelism between them. Also we have an option to make write transactions IMMEDIATE, also
/// w/o the external locking. But then the most of cons above are still valid. Instead, if we
/// perform all write transactions under an async mutex, the only cons is losing some
/// parallelism for write transactions.
///
/// The only pro of making write transactions DEFERRED w/o the external locking would be some
/// parallelism between them.
///
/// Another option would be to make write transactions IMMEDIATE, also
/// w/o the external locking. But then cons 1. - 3. above would still be valid.
pub async fn write_lock(&self) -> MutexGuard<'_, ()> {
self.write_mtx.lock().await
}
/// Allocates a connection and calls `function` with the connection. If `function` does write
/// queries, either a lock must be taken first using `write_lock()` or `call_write()` used
/// instead.
/// queries,
/// - either first take a lock using `write_lock()`
/// - or use `call_write()` instead.
///
/// Returns the result of the function.
async fn call<'a, F, R>(&'a self, function: F) -> Result<R>
@@ -685,28 +698,19 @@ fn new_connection(path: &Path, passphrase: &str) -> Result<Connection> {
/// Cleanup the account to restore some storage and optimize the database.
pub async fn housekeeping(context: &Context) -> Result<()> {
if let Err(err) = remove_unused_files(context).await {
warn!(
context,
"Housekeeping: cannot remove unused files: {:#}", err
);
warn!("Housekeeping: cannot remove unused files: {err:#}.");
}
if let Err(err) = start_ephemeral_timers(context).await {
warn!(
context,
"Housekeeping: cannot start ephemeral timers: {:#}", err
);
warn!("Housekeeping: cannot start ephemeral timers: {err:#}.");
}
if let Err(err) = prune_tombstones(&context.sql).await {
warn!(
context,
"Housekeeping: Cannot prune message tombstones: {:#}", err
);
warn!("Housekeeping: Cannot prune message tombstones: {err:#}.");
}
if let Err(err) = deduplicate_peerstates(&context.sql).await {
warn!(context, "Failed to deduplicate peerstates: {:#}", err)
warn!("Failed to deduplicate peerstates: {err:#}.")
}
context.schedule_quota_update().await?;
@@ -719,11 +723,11 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
.await
{
Err(err) => {
warn!(context, "Failed to run incremental vacuum: {err:#}");
warn!("Failed to run incremental vacuum: {err:#}.");
}
Ok(Some(())) => {
// Incremental vacuum returns a zero-column result if it did anything.
info!(context, "Successfully ran incremental vacuum.");
info!("Successfully ran incremental vacuum.");
}
Ok(None) => {
// Incremental vacuum returned `SQLITE_DONE` immediately,
@@ -735,7 +739,7 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
.set_config(Config::LastHousekeeping, Some(&time().to_string()))
.await
{
warn!(context, "Can't set config: {}", e);
warn!("Can't set config: {e:#}.");
}
context
@@ -747,7 +751,7 @@ pub async fn housekeeping(context: &Context) -> Result<()> {
.await
.ok_or_log_msg(context, "failed to remove old MDNs");
info!(context, "Housekeeping done.");
info!("Housekeeping done.");
Ok(())
}
@@ -756,7 +760,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
let mut files_in_use = HashSet::new();
let mut unreferenced_count = 0;
info!(context, "Start housekeeping...");
info!("Start housekeeping...");
maybe_add_from_param(
&context.sql,
&mut files_in_use,
@@ -802,7 +806,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
.await
.context("housekeeping: failed to SELECT value FROM config")?;
info!(context, "{} files in use.", files_in_use.len(),);
info!("{} files in use.", files_in_use.len());
/* go through directories and delete unused files */
let blobdir = context.get_blobdir();
for p in [&blobdir.join(BLOBS_BACKUP_NAME), blobdir] {
@@ -833,8 +837,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
// The dir could be created not by a user, but by a desktop
// environment f.e. So, no warning.
info!(
context,
"Housekeeping: Cannot rmdir {}: {:#}",
"Housekeeping: Cannot rmdir {}: {:#}.",
entry.path().display(),
e
);
@@ -855,8 +858,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
&& (recently_created || recently_modified || recently_accessed)
{
info!(
context,
"Housekeeping: Keeping new unreferenced file #{}: {:?}",
"Housekeeping: Keeping new unreferenced file #{}: {:?}.",
unreferenced_count,
entry.file_name(),
);
@@ -866,15 +868,13 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
unreferenced_count += 1;
}
info!(
context,
"Housekeeping: Deleting unreferenced file #{}: {:?}",
"Housekeeping: Deleting unreferenced file #{}: {:?}.",
unreferenced_count,
entry.file_name()
);
let path = entry.path();
if let Err(err) = delete_file(context, &path).await {
error!(
context,
"Failed to delete unused file {}: {:#}.",
path.display(),
err
@@ -883,12 +883,7 @@ pub async fn remove_unused_files(context: &Context) -> Result<()> {
}
}
Err(err) => {
warn!(
context,
"Housekeeping: Cannot read dir {}: {:#}",
p.display(),
err
);
warn!("Housekeeping: Cannot read dir {}: {:#}.", p.display(), err);
}
}
}
@@ -1150,7 +1145,6 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_migration_flags() -> Result<()> {
let t = TestContext::new().await;
t.evtracker.get_info_contains("Opened database").await;
// as migrations::run() was already executed on context creation,
// another call should not result in any action needed.
@@ -1162,7 +1156,9 @@ mod tests {
assert!(!disable_server_delete);
assert!(!recode_avatar);
info!(&t, "test_migration_flags: XXX END MARKER");
t.emit_event(crate::EventType::Info(
"test_migration_flags: XXX END MARKER".to_string(),
));
loop {
let evt = t

View File

@@ -1,6 +1,7 @@
//! Migrations module.
use anyhow::{Context as _, Result};
use tracing::{info, warn};
use crate::config::Config;
use crate::constants::ShowEmails;
@@ -372,7 +373,7 @@ UPDATE chats SET protected=1, type=120 WHERE type=130;"#,
)
.await?;
} else {
warn!(context, "Can't parse configured address: {:?}", addr);
warn!("Can't parse configured address: {:?}", addr);
}
}
@@ -691,20 +692,18 @@ CREATE INDEX smtp_messageid ON imap(rfc724_mid);
sql.set_db_version(98).await?;
}
if dbversion < 99 {
sql.execute_migration_parts(
[
"ALTER TABLE msgs DROP COLUMN server_folder;",
"ALTER TABLE msgs DROP COLUMN server_uid;",
"ALTER TABLE msgs DROP COLUMN move_state;",
"ALTER TABLE chats DROP COLUMN draft_timestamp;",
"ALTER TABLE chats DROP COLUMN draft_txt",
],
99,
)
.await?;
// sql.execute_migration(
// "ALTER TABLE msgs DROP COLUMN server_folder;
// ALTER TABLE msgs DROP COLUMN server_uid;
// ALTER TABLE msgs DROP COLUMN move_state;
// ALTER TABLE chats DROP COLUMN draft_timestamp;
// ALTER TABLE chats DROP COLUMN draft_txt",
// 99,
// )
// .await?;
// Reverted above, as it requires to load the whole DB in memory.
// sql.set_db_version(99).await?;
sql.set_db_version(99).await?;
}
let new_version = sql
@@ -717,10 +716,7 @@ CREATE INDEX smtp_messageid ON imap(rfc724_mid);
} else {
"Created new database; "
};
info!(
context,
"{}[migration] v{}-v{}", created_db, dbversion, new_version
);
info!("{}[migration] v{}-v{}", created_db, dbversion, new_version);
}
Ok((
@@ -758,37 +754,4 @@ impl Sql {
Ok(())
}
async fn execute_migration_parts(
&self,
queries: impl IntoIterator<Item = &'static str>,
version: i32,
) -> Result<()> {
for query in queries {
self.transaction(move |transaction| {
transaction.execute_batch(query)?;
Ok(())
})
.await
.with_context(|| format!("execute_migration failed for version {version}"))?;
}
self.transaction(move |transaction| {
// set raw config inside the transaction
transaction.execute(
"UPDATE config SET value=? WHERE keyname=?;",
paramsv![format!("{version}"), VERSION_CFG],
)?;
Ok(())
})
.await
.context("failed to set version")?;
let mut lock = self.config_cache.write().await;
lock.insert(VERSION_CFG.to_string(), Some(format!("{version}")));
drop(lock);
Ok(())
}
}

View File

@@ -404,6 +404,9 @@ pub enum StockMessage {
#[strum(props(fallback = "Chat protection disabled by %1$s."))]
ProtectionDisabledBy = 161,
#[strum(props(fallback = "Scan to set up second device for %1$s"))]
BackupTransferQr = 162,
}
impl StockMessage {
@@ -741,14 +744,14 @@ pub(crate) async fn setup_contact_qr_description(
display_name: &str,
addr: &str,
) -> String {
let name = &if display_name == addr {
let name = if display_name == addr {
addr.to_owned()
} else {
format!("{display_name} ({addr})")
};
translated(context, StockMessage::SetupContactQRDescription)
.await
.replace1(name)
.replace1(&name)
}
/// Stock string: `Scan to join %1$s`.
@@ -1240,6 +1243,24 @@ pub(crate) async fn aeap_explanation_and_link(
.replace2(new_addr)
}
/// Text to put in the [`Qr::Backup`] rendered SVG image.
///
/// The default is "Scan to set up second device for <account name (account addr)>". The
/// account name and address are looked up from the context.
///
/// [`Qr::Backup`]: crate::qr::Qr::Backup
pub(crate) async fn backup_transfer_qr(context: &Context) -> Result<String> {
let contact = Contact::get_by_id(context, ContactId::SELF).await?;
let addr = contact.get_addr();
let full_name = match context.get_config(Config::Displayname).await? {
Some(name) if name != addr => format!("{name} ({addr})"),
_ => addr.to_string(),
};
Ok(translated(context, StockMessage::BackupTransferQr)
.await
.replace1(&full_name))
}
impl Context {
/// Set the stock string for the [StockMessage].
///

View File

@@ -4,6 +4,7 @@ use anyhow::Result;
use lettre_email::mime::{self};
use lettre_email::PartBuilder;
use serde::{Deserialize, Serialize};
use tracing::{info, warn};
use crate::chat::{Chat, ChatId};
use crate::config::Config;
@@ -90,8 +91,8 @@ impl Context {
let chat = Chat::load_from_db(self, chat_id).await?;
if !chat.is_promoted() {
info!(
self,
"group '{}' not yet promoted, do not sync tokens yet.", chat.grpid
"group '{}' not yet promoted, do not sync tokens yet.",
chat.grpid
);
return Ok(());
}
@@ -109,8 +110,8 @@ impl Context {
Ok(())
}
// Add deleted qr-code token to the list of items to be synced
// so that the token also gets deleted on the other devices.
/// Adds deleted qr-code token to the list of items to be synced
/// so that the token also gets deleted on the other devices.
pub(crate) async fn sync_qr_code_token_deletion(
&self,
invitenumber: String,
@@ -226,7 +227,7 @@ impl Context {
/// If eg. just an item cannot be deleted,
/// that should not hold off the other items to be executed.
pub(crate) async fn execute_sync_items(&self, items: &SyncItems) -> Result<()> {
info!(self, "executing {} sync item(s)", items.items.len());
info!("executing {} sync item(s)", items.items.len());
for item in &items.items {
match &item.data {
AddQrToken(token) => {
@@ -236,10 +237,7 @@ impl Context {
{
Some(chat_id)
} else {
warn!(
self,
"Ignoring token for nonexistent/deleted group '{}'.", grpid
);
warn!("Ignoring token for nonexistent/deleted group '{}'.", grpid);
continue;
}
} else {

Some files were not shown because too many files have changed in this diff Show More