Compare commits

..

7 Commits

Author SHA1 Message Date
link2xt
f24843fbb1 Release 1.112.4 2023-03-31 01:12:26 +00:00
link2xt
6c57bc9438 Fix call to auditwheel in scripts/run_all.sh
This bugs prevents CI from building wheels.
2023-03-31 01:02:41 +00:00
link2xt
e612927c5d Fix links in the changelog 2023-03-30 20:36:30 +00:00
link2xt
aff951440c Release 1.112.3 2023-03-30 20:32:49 +00:00
Floris Bruynooghe
ef63e01632 fix(imex): transfer::get_backup must always free ongoing process (#4249)
* fix(imex): transfer::get_backup must always free ongoing process

When the ongoing process is cancelled it is still the responsibility
of whoever took out the ongoing process to free it.  This code was
only freeing the ongoing process when completed normally but not when
cancelled.

* add changelog
2023-03-30 20:23:50 +00:00
link2xt
d6fdc7cb67 Release 1.112.2 2023-03-30 14:57:32 +00:00
link2xt
ea87c78d34 Do not return media from trashed messages in the "All media" view. 2023-03-30 08:10:06 +00:00
13 changed files with 61 additions and 46 deletions

View File

@@ -1,12 +1,24 @@
# Changelog
## 1.112.2 - unreleased
## [1.112.4] - 2023-03-31
### Fixes
- Fix call to `auditwheel` in `scripts/run_all.sh`.
## [1.112.3] - 2023-03-30
### Fixes
- `transfer::get_backup` now frees ongoing process when cancelled. #4249
## [1.112.2] - 2023-03-30
### Changes
- Update iroh, remove `default-net` from `[patch.crates-io]` section.
- transfer backup: Connect to mutliple provider addresses concurrently. This should speed up connection time significantly on the getter side. #4240
- Make sure BackupProvider is cancelled on drop (or dc_backup_provider_unref). The BackupProvider will now alaway finish with an IMEX event of 1000 or 0, previoulsy it would sometimes finishe with 1000 (success) when it really was 0 (failure). #4242
- Fix crash when dc_backup_provider_t is unrefed while dc_backup_provider_wait() is still using it. #4244
### Fixes
- Do not return media from trashed messages in the "All media" view. #4247
## [1.112.1] - 2023-03-27
@@ -2341,3 +2353,6 @@ https://github.com/deltachat/deltachat-core-rust/pulls?q=is%3Apr+is%3Aclosed
[1.111.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.110.0...v1.111.0
[1.112.0]: https://github.com/deltachat/deltachat-core-rust/compare/v1.111.0...v1.112.0
[1.112.1]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.0...v1.112.1
[1.112.2]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.1...v1.112.2
[1.112.3]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.2...v1.112.3
[1.112.4]: https://github.com/deltachat/deltachat-core-rust/compare/v1.112.3...v1.112.4

10
Cargo.lock generated
View File

@@ -1061,7 +1061,7 @@ dependencies = [
[[package]]
name = "deltachat"
version = "1.112.1"
version = "1.112.4"
dependencies = [
"ansi_term",
"anyhow",
@@ -1135,7 +1135,7 @@ dependencies = [
[[package]]
name = "deltachat-jsonrpc"
version = "1.112.1"
version = "1.112.4"
dependencies = [
"anyhow",
"async-channel",
@@ -1158,7 +1158,7 @@ dependencies = [
[[package]]
name = "deltachat-repl"
version = "1.112.1"
version = "1.112.4"
dependencies = [
"ansi_term",
"anyhow",
@@ -1173,7 +1173,7 @@ dependencies = [
[[package]]
name = "deltachat-rpc-server"
version = "1.112.1"
version = "1.112.4"
dependencies = [
"anyhow",
"deltachat",
@@ -1197,7 +1197,7 @@ dependencies = [
[[package]]
name = "deltachat_ffi"
version = "1.112.1"
version = "1.112.4"
dependencies = [
"anyhow",
"deltachat",

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat"
version = "1.112.1"
version = "1.112.4"
edition = "2021"
license = "MPL-2.0"
rust-version = "1.64"

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat_ffi"
version = "1.112.1"
version = "1.112.4"
description = "Deltachat FFI"
edition = "2018"
readme = "README.md"

View File

@@ -4221,17 +4221,10 @@ pub unsafe extern "C" fn dc_backup_provider_wait(provider: *mut dc_backup_provid
let ffi_provider = &mut *provider;
let ctx = &*ffi_provider.context;
let provider = &mut ffi_provider.provider;
backup_provider_wait(ctx.clone(), provider.clone());
}
// Because this is a long-running operation make sure we own the Context and BackupProvider.
// This stops a FFI user from deallocating it by calling unref on the object while we are
// using it.
fn backup_provider_wait(context: Context, provider: BackupProvider) {
block_on(provider)
.log_err(&context, "Failed to await BackupProvider")
.log_err(ctx, "Failed to await BackupProvider")
.context("Failed to await BackupProvider")
.set_last_error(&context)
.set_last_error(ctx)
.ok();
}

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-jsonrpc"
version = "1.112.1"
version = "1.112.4"
description = "DeltaChat JSON-RPC API"
edition = "2021"
default-run = "deltachat-jsonrpc-server"

View File

@@ -55,5 +55,5 @@
},
"type": "module",
"types": "dist/deltachat.d.ts",
"version": "1.112.1"
"version": "1.112.4"
}

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-repl"
version = "1.112.1"
version = "1.112.4"
license = "MPL-2.0"
edition = "2021"

View File

@@ -1,6 +1,6 @@
[package]
name = "deltachat-rpc-server"
version = "1.112.1"
version = "1.112.4"
description = "DeltaChat JSON-RPC server"
edition = "2021"
readme = "README.md"

View File

@@ -60,5 +60,5 @@
"test:mocha": "mocha -r esm node/test/test.js --growl --reporter=spec --bail --exit"
},
"types": "node/dist/index.d.ts",
"version": "1.112.1"
}
"version": "1.112.4"
}

View File

@@ -33,7 +33,7 @@ unset DCC_NEW_TMP_EMAIL
# E.g. musllinux_1_1 does not have PyPy interpreters as of 2022-07-10
tox --workdir "$TOXWORKDIR" -e py37,py38,py39,py310,py311,pypy37,pypy38,pypy39 --skip-missing-interpreters true
auditwheel repair "$TOXWORKDIR/wheelhouse/deltachat*" -w "$TOXWORKDIR/wheelhouse"
auditwheel repair "$TOXWORKDIR"/wheelhouse/deltachat* -w "$TOXWORKDIR/wheelhouse"
echo -----------------------

View File

@@ -2727,12 +2727,14 @@ pub async fn get_chat_media(
"SELECT id
FROM msgs
WHERE (1=? OR chat_id=?)
AND chat_id != ?
AND (type=? OR type=? OR type=?)
AND hidden=0
ORDER BY timestamp, id;",
paramsv![
chat_id.is_none(),
chat_id.unwrap_or_else(|| ChatId::new(0)),
DC_CHAT_ID_TRASH,
msg_type,
if msg_type2 != Viewtype::Unknown {
msg_type2
@@ -3795,6 +3797,7 @@ mod tests {
use crate::chatlist::{get_archived_cnt, Chatlist};
use crate::constants::{DC_GCL_ARCHIVED_ONLY, DC_GCL_NO_SPECIALS};
use crate::contact::{Contact, ContactAddress};
use crate::message::delete_msgs;
use crate::receive_imf::receive_imf;
use crate::test_utils::TestContext;
@@ -5977,7 +5980,7 @@ mod tests {
include_bytes!("../test-data/image/avatar64x64.png"),
)
.await?;
send_media(
let second_image_msg_id = send_media(
&t,
chat_id2,
Viewtype::Image,
@@ -6079,6 +6082,21 @@ mod tests {
4
);
// Delete an image.
delete_msgs(&t, &[second_image_msg_id]).await?;
assert_eq!(
get_chat_media(
&t,
None,
Viewtype::Image,
Viewtype::Sticker,
Viewtype::Webxdc,
)
.await?
.len(),
3
);
Ok(())
}
}

View File

@@ -27,13 +27,10 @@ use std::net::Ipv4Addr;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::Arc;
use std::task::Poll;
use anyhow::{anyhow, bail, ensure, format_err, Context as _, Result};
use async_channel::Receiver;
use futures::future::{BoxFuture, Shared};
use futures::{FutureExt, TryFutureExt};
use futures_lite::StreamExt;
use iroh::blobs::Collection;
use iroh::get::DataStream;
@@ -45,7 +42,7 @@ use tokio::fs::{self, File};
use tokio::io::{self, AsyncWriteExt, BufWriter};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::{broadcast, Mutex};
use tokio::task::JoinSet;
use tokio::task::{JoinHandle, JoinSet};
use tokio_stream::wrappers::ReadDirStream;
use tokio_util::sync::CancellationToken;
@@ -72,14 +69,14 @@ const MAX_CONCURRENT_DIALS: u8 = 16;
///
/// The task implements [`Future`] and awaiting it will complete once a transfer has been
/// either completed or aborted.
#[derive(Debug, Clone)]
#[derive(Debug)]
pub struct BackupProvider {
/// The supervisor task, run by [`BackupProvider::watch_provider`].
handle: Shared<BoxFuture<'static, Result<(), String>>>,
handle: JoinHandle<Result<()>>,
/// The ticket to retrieve the backup collection.
ticket: Ticket,
/// Guard to cancel the provider on drop.
_drop_guard: Arc<tokio_util::sync::DropGuard>,
_drop_guard: tokio_util::sync::DropGuard,
}
impl BackupProvider {
@@ -142,17 +139,13 @@ impl BackupProvider {
// Explicit drop to move the guards into this future
drop(paused_guard);
drop(dbfile);
res.map_err(|err| format!("{err:#}"))
res
})
.map_err(|err| format!("{err}"))
.and_then(futures::future::ready)
.boxed()
.shared()
};
Ok(Self {
handle,
ticket,
_drop_guard: Arc::new(drop_token.drop_guard()),
_drop_guard: drop_token.drop_guard(),
})
}
@@ -300,9 +293,7 @@ impl Future for BackupProvider {
type Output = Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.handle)
.poll(cx)
.map_err(anyhow::Error::msg)
Pin::new(&mut self.handle).poll(cx)?
}
}
@@ -401,12 +392,10 @@ pub async fn get_backup(context: &Context, qr: Qr) -> Result<()> {
let cancel_token = context.alloc_ongoing().await?;
let res = tokio::select! {
biased;
res = get_backup_inner(context, qr) => {
context.free_ongoing().await;
res
}
res = get_backup_inner(context, qr) => res,
_ = cancel_token.recv() => Err(format_err!("cancelled")),
};
context.free_ongoing().await;
res
}