imex: use param2 for passphrase

This commit is contained in:
link2xt
2022-01-16 13:22:08 +00:00
parent 9b562eebcd
commit a1f04d2129
10 changed files with 212 additions and 135 deletions

View File

@@ -17,6 +17,8 @@
- allow to remove quotes on drafts `dc_msg_set_quote(msg, NULL)` #2950 - allow to remove quotes on drafts `dc_msg_set_quote(msg, NULL)` #2950
- Use second parameter of `dc_imex` to provide backup passphrase #2980
#### Removed #### Removed
- Removed `mvbox_watch` option. #2906 - Removed `mvbox_watch` option. #2906

6
Cargo.lock generated
View File

@@ -2105,8 +2105,7 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a"
[[package]] [[package]]
name = "libsqlite3-sys" name = "libsqlite3-sys"
version = "0.23.2" version = "0.23.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/rusqlite/rusqlite?branch=master#ddb7141c6dee4b8956af85b2e4a01a28e5fdbacc"
checksum = "d2cafc7c74096c336d9d27145f7ebd4f4b6f95ba16aa5a282387267e6925cb58"
dependencies = [ dependencies = [
"cc", "cc",
"openssl-sys", "openssl-sys",
@@ -3079,8 +3078,7 @@ dependencies = [
[[package]] [[package]]
name = "rusqlite" name = "rusqlite"
version = "0.26.3" version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/rusqlite/rusqlite?branch=master#ddb7141c6dee4b8956af85b2e4a01a28e5fdbacc"
checksum = "4ba4d3462c8b2e4d7f4fcfcf2b296dc6b65404fbbc7b63daa37fd485c149daf7"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"fallible-iterator", "fallible-iterator",

View File

@@ -12,6 +12,9 @@ debug = 0
[profile.release] [profile.release]
lto = true lto = true
[patch.crates-io]
rusqlite = { git = "https://github.com/rusqlite/rusqlite", branch="master" }
[dependencies] [dependencies]
deltachat_derive = { path = "./deltachat_derive" } deltachat_derive = { path = "./deltachat_derive" }

View File

@@ -2023,8 +2023,8 @@ dc_contact_t* dc_get_contact (dc_context_t* context, uint32_t co
#define DC_IMEX_EXPORT_SELF_KEYS 1 // param1 is a directory where the keys are written to #define DC_IMEX_EXPORT_SELF_KEYS 1 // param1 is a directory where the keys are written to
#define DC_IMEX_IMPORT_SELF_KEYS 2 // param1 is a directory where the keys are searched in and read from #define DC_IMEX_IMPORT_SELF_KEYS 2 // param1 is a directory where the keys are searched in and read from
#define DC_IMEX_EXPORT_BACKUP 11 // param1 is a directory where the backup is written to #define DC_IMEX_EXPORT_BACKUP 11 // param1 is a directory where the backup is written to, param2 is a passphrase to encrypt the backup
#define DC_IMEX_IMPORT_BACKUP 12 // param1 is the file with the backup to import #define DC_IMEX_IMPORT_BACKUP 12 // param1 is the file with the backup to import, param2 is the backup's passphrase
/** /**
@@ -2033,14 +2033,16 @@ dc_contact_t* dc_get_contact (dc_context_t* context, uint32_t co
* if needed stop IO using dc_accounts_stop_io() or dc_stop_io() first. * if needed stop IO using dc_accounts_stop_io() or dc_stop_io() first.
* What to do is defined by the _what_ parameter which may be one of the following: * What to do is defined by the _what_ parameter which may be one of the following:
* *
* - **DC_IMEX_EXPORT_BACKUP** (11) - Export a backup to the directory given as `param1`. * - **DC_IMEX_EXPORT_BACKUP** (11) - Export a backup to the directory given as `param1`
* encrypted with the passphrase given as `param2`. If `param2` is NULL or empty string,
* the backup is not encrypted.
* The backup contains all contacts, chats, images and other data and device independent settings. * The backup contains all contacts, chats, images and other data and device independent settings.
* The backup does not contain device dependent settings as ringtones or LED notification settings. * The backup does not contain device dependent settings as ringtones or LED notification settings.
* The name of the backup is typically `delta-chat-<day>.tar`, if more than one backup is create on a day, * The name of the backup is typically `delta-chat-<day>.tar`, if more than one backup is create on a day,
* the format is `delta-chat-<day>-<number>.tar` * the format is `delta-chat-<day>-<number>.tar`
* *
* - **DC_IMEX_IMPORT_BACKUP** (12) - `param1` is the file (not: directory) to import. The file is normally * - **DC_IMEX_IMPORT_BACKUP** (12) - `param1` is the file (not: directory) to import. `param2` is the passphrase.
* created by DC_IMEX_EXPORT_BACKUP and detected by dc_imex_has_backup(). Importing a backup * The file is normally created by DC_IMEX_EXPORT_BACKUP and detected by dc_imex_has_backup(). Importing a backup
* is only possible as long as the context is not configured or used in another way. * is only possible as long as the context is not configured or used in another way.
* *
* - **DC_IMEX_EXPORT_SELF_KEYS** (1) - Export all private keys and all public keys of the user to the * - **DC_IMEX_EXPORT_SELF_KEYS** (1) - Export all private keys and all public keys of the user to the

View File

@@ -2022,7 +2022,7 @@ pub unsafe extern "C" fn dc_imex(
context: *mut dc_context_t, context: *mut dc_context_t,
what_raw: libc::c_int, what_raw: libc::c_int,
param1: *const libc::c_char, param1: *const libc::c_char,
_param2: *const libc::c_char, param2: *const libc::c_char,
) { ) {
if context.is_null() { if context.is_null() {
eprintln!("ignoring careless call to dc_imex()"); eprintln!("ignoring careless call to dc_imex()");
@@ -2035,12 +2035,13 @@ pub unsafe extern "C" fn dc_imex(
return; return;
} }
}; };
let passphrase = to_opt_string_lossy(param2);
let ctx = &*context; let ctx = &*context;
if let Some(param1) = to_opt_string_lossy(param1) { if let Some(param1) = to_opt_string_lossy(param1) {
spawn(async move { spawn(async move {
imex::imex(ctx, what, param1.as_ref()) imex::imex(ctx, what, param1.as_ref(), passphrase)
.await .await
.log_err(ctx, "IMEX failed") .log_err(ctx, "IMEX failed")
}); });

View File

@@ -472,20 +472,32 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
} }
"export-backup" => { "export-backup" => {
let dir = dirs::home_dir().unwrap_or_default(); let dir = dirs::home_dir().unwrap_or_default();
imex(&context, ImexMode::ExportBackup, dir.as_ref()).await?; imex(
&context,
ImexMode::ExportBackup,
dir.as_ref(),
Some(arg2.to_string()),
)
.await?;
println!("Exported to {}.", dir.to_string_lossy()); println!("Exported to {}.", dir.to_string_lossy());
} }
"import-backup" => { "import-backup" => {
ensure!(!arg1.is_empty(), "Argument <backup-file> missing."); ensure!(!arg1.is_empty(), "Argument <backup-file> missing.");
imex(&context, ImexMode::ImportBackup, arg1.as_ref()).await?; imex(
&context,
ImexMode::ImportBackup,
arg1.as_ref(),
Some(arg2.to_string()),
)
.await?;
} }
"export-keys" => { "export-keys" => {
let dir = dirs::home_dir().unwrap_or_default(); let dir = dirs::home_dir().unwrap_or_default();
imex(&context, ImexMode::ExportSelfKeys, dir.as_ref()).await?; imex(&context, ImexMode::ExportSelfKeys, dir.as_ref(), None).await?;
println!("Exported to {}.", dir.to_string_lossy()); println!("Exported to {}.", dir.to_string_lossy());
} }
"import-keys" => { "import-keys" => {
imex(&context, ImexMode::ImportSelfKeys, arg1.as_ref()).await?; imex(&context, ImexMode::ImportSelfKeys, arg1.as_ref(), None).await?;
} }
"export-setup" => { "export-setup" => {
let setup_code = create_setup_code(&context); let setup_code = create_setup_code(&context);

View File

@@ -109,8 +109,8 @@ impl Context {
let context = Self::new_closed(dbfile, id).await?; let context = Self::new_closed(dbfile, id).await?;
// Open the database if is not encrypted. // Open the database if is not encrypted.
if context.set_passphrase("".to_string()).await? { if context.check_passphrase("".to_string()).await? {
context.sql.open(&context).await?; context.sql.open(&context, "".to_string()).await?;
} }
Ok(context) Ok(context)
} }
@@ -133,8 +133,8 @@ impl Context {
/// Returns true if passphrase is correct, false is passphrase is not correct. Fails on other /// Returns true if passphrase is correct, false is passphrase is not correct. Fails on other
/// errors. /// errors.
pub async fn open(&self, passphrase: String) -> Result<bool> { pub async fn open(&self, passphrase: String) -> Result<bool> {
if self.sql.set_passphrase(passphrase).await? { if self.sql.check_passphrase(passphrase.clone()).await? {
self.sql.open(self).await?; self.sql.open(self, passphrase).await?;
Ok(true) Ok(true)
} else { } else {
Ok(false) Ok(false)
@@ -146,13 +146,13 @@ impl Context {
self.sql.is_open().await self.sql.is_open().await
} }
/// Sets the database passphrase. /// Tests the database passphrase.
/// ///
/// Returns true if passphrase is correct. /// Returns true if passphrase is correct.
/// ///
/// Fails if database is already open. /// Fails if database is already open.
pub async fn set_passphrase(&self, passphrase: String) -> Result<bool> { pub(crate) async fn check_passphrase(&self, passphrase: String) -> Result<bool> {
self.sql.set_passphrase(passphrase).await self.sql.check_passphrase(passphrase).await
} }
pub(crate) async fn with_blobdir( pub(crate) async fn with_blobdir(
@@ -1039,7 +1039,7 @@ mod tests {
} }
#[async_std::test] #[async_std::test]
async fn test_set_passphrase() -> Result<()> { async fn test_check_passphrase() -> Result<()> {
let dir = tempdir()?; let dir = tempdir()?;
let dbfile = dir.path().join("db.sqlite"); let dbfile = dir.path().join("db.sqlite");
@@ -1056,7 +1056,7 @@ mod tests {
.await .await
.context("failed to create context")?; .context("failed to create context")?;
assert_eq!(context.is_open().await, false); assert_eq!(context.is_open().await, false);
assert_eq!(context.set_passphrase("bar".to_string()).await?, false); assert_eq!(context.check_passphrase("bar".to_string()).await?, false);
assert_eq!(context.open("false".to_string()).await?, false); assert_eq!(context.open("false".to_string()).await?, false);
assert_eq!(context.open("foo".to_string()).await?, true); assert_eq!(context.open("foo".to_string()).await?, true);

View File

@@ -13,7 +13,7 @@ use async_std::path::{Path, PathBuf};
use async_std::prelude::*; use async_std::prelude::*;
use async_std::{fs, io}; use async_std::{fs, io};
use anyhow::{bail, Error}; use anyhow::Error;
use chrono::{Local, TimeZone}; use chrono::{Local, TimeZone};
use mailparse::dateparse; use mailparse::dateparse;
use mailparse::headers::Headers; use mailparse::headers::Headers;
@@ -451,33 +451,6 @@ pub fn dc_open_file_std<P: AsRef<std::path::Path>>(
} }
} }
/// Returns Ok((temp_path, dest_path)) on success. The backup can then be written to temp_path. If the backup succeeded,
/// it can be renamed to dest_path. This guarantees that the backup is complete.
pub(crate) async fn get_next_backup_path(
folder: impl AsRef<Path>,
backup_time: i64,
) -> Result<(PathBuf, PathBuf), Error> {
let folder = PathBuf::from(folder.as_ref());
let stem = chrono::NaiveDateTime::from_timestamp(backup_time, 0)
// Don't change this file name format, in has_backup() we use string comparison to determine which backup is newer:
.format("delta-chat-backup-%Y-%m-%d")
.to_string();
// 64 backup files per day should be enough for everyone
for i in 0..64 {
let mut tempfile = folder.clone();
tempfile.push(format!("{}-{:02}.tar.part", stem, i));
let mut destfile = folder.clone();
destfile.push(format!("{}-{:02}.tar", stem, i));
if !tempfile.exists().await && !destfile.exists().await {
return Ok((tempfile, destfile));
}
}
bail!("could not create backup file, disk full?");
}
pub(crate) fn time() -> i64 { pub(crate) fn time() -> i64 {
SystemTime::now() SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH) .duration_since(SystemTime::UNIX_EPOCH)

View File

@@ -20,7 +20,7 @@ use crate::constants::{Viewtype, DC_CONTACT_ID_SELF};
use crate::context::Context; use crate::context::Context;
use crate::dc_tools::{ use crate::dc_tools::{
dc_create_folder, dc_delete_file, dc_delete_files_in_dir, dc_get_filesuffix_lc, dc_create_folder, dc_delete_file, dc_delete_files_in_dir, dc_get_filesuffix_lc,
dc_open_file_std, dc_read_file, dc_write_file, get_next_backup_path, time, EmailAddress, dc_open_file_std, dc_read_file, dc_write_file, time, EmailAddress,
}; };
use crate::e2ee; use crate::e2ee;
use crate::events::EventType; use crate::events::EventType;
@@ -41,24 +41,24 @@ const BLOBS_BACKUP_NAME: &str = "blobs_backup";
#[repr(u32)] #[repr(u32)]
pub enum ImexMode { pub enum ImexMode {
/// Export all private keys and all public keys of the user to the /// Export all private keys and all public keys of the user to the
/// directory given as `param1`. The default key is written to the files `public-key-default.asc` /// directory given as `path`. The default key is written to the files `public-key-default.asc`
/// and `private-key-default.asc`, if there are more keys, they are written to files as /// and `private-key-default.asc`, if there are more keys, they are written to files as
/// `public-key-<id>.asc` and `private-key-<id>.asc` /// `public-key-<id>.asc` and `private-key-<id>.asc`
ExportSelfKeys = 1, ExportSelfKeys = 1,
/// Import private keys found in the directory given as `param1`. /// Import private keys found in the directory given as `path`.
/// The last imported key is made the default keys unless its name contains the string `legacy`. /// The last imported key is made the default keys unless its name contains the string `legacy`.
/// Public keys are not imported. /// Public keys are not imported.
ImportSelfKeys = 2, ImportSelfKeys = 2,
/// Export a backup to the directory given as `param1`. /// Export a backup to the directory given as `path` with the given `passphrase`.
/// The backup contains all contacts, chats, images and other data and device independent settings. /// The backup contains all contacts, chats, images and other data and device independent settings.
/// The backup does not contain device dependent settings as ringtones or LED notification settings. /// The backup does not contain device dependent settings as ringtones or LED notification settings.
/// The name of the backup is typically `delta-chat-<day>.tar`, if more than one backup is create on a day, /// The name of the backup is typically `delta-chat-<day>.tar`, if more than one backup is create on a day,
/// the format is `delta-chat-<day>-<number>.tar` /// the format is `delta-chat-<day>-<number>.tar`
ExportBackup = 11, ExportBackup = 11,
/// `param1` is the file (not: directory) to import. The file is normally /// `path` is the file (not: directory) to import. The file is normally
/// created by DC_IMEX_EXPORT_BACKUP and detected by dc_imex_has_backup(). Importing a backup /// created by DC_IMEX_EXPORT_BACKUP and detected by dc_imex_has_backup(). Importing a backup
/// is only possible as long as the context is not configured or used in another way. /// is only possible as long as the context is not configured or used in another way.
ImportBackup = 12, ImportBackup = 12,
@@ -78,11 +78,16 @@ pub enum ImexMode {
/// ///
/// Only one import-/export-progress can run at the same time. /// Only one import-/export-progress can run at the same time.
/// To cancel an import-/export-progress, drop the future returned by this function. /// To cancel an import-/export-progress, drop the future returned by this function.
pub async fn imex(context: &Context, what: ImexMode, param1: &Path) -> Result<()> { pub async fn imex(
context: &Context,
what: ImexMode,
path: &Path,
passphrase: Option<String>,
) -> Result<()> {
let cancel = context.alloc_ongoing().await?; let cancel = context.alloc_ongoing().await?;
let res = async { let res = async {
let success = imex_inner(context, what, param1).await; let success = imex_inner(context, what, path, passphrase).await;
match success { match success {
Ok(()) => { Ok(()) => {
info!(context, "IMEX successfully completed"); info!(context, "IMEX successfully completed");
@@ -115,11 +120,6 @@ async fn cleanup_aborted_imex(context: &Context, what: ImexMode) {
dc_delete_file(context, context.get_dbfile()).await; dc_delete_file(context, context.get_dbfile()).await;
dc_delete_files_in_dir(context, context.get_blobdir()).await; dc_delete_files_in_dir(context, context.get_blobdir()).await;
} }
if what == ImexMode::ExportBackup || what == ImexMode::ImportBackup {
if let Err(e) = context.sql.open(context).await {
warn!(context, "Re-opening db after imex failed: {}", e);
}
}
} }
/// Returns the filename of the backup found (otherwise an error) /// Returns the filename of the backup found (otherwise an error)
@@ -396,7 +396,12 @@ fn normalize_setup_code(s: &str) -> String {
out out
} }
async fn imex_inner(context: &Context, what: ImexMode, path: &Path) -> Result<()> { async fn imex_inner(
context: &Context,
what: ImexMode,
path: &Path,
passphrase: Option<String>,
) -> Result<()> {
info!(context, "Import/export dir: {}", path.display()); info!(context, "Import/export dir: {}", path.display());
ensure!(context.sql.is_open().await, "Database not opened."); ensure!(context.sql.is_open().await, "Database not opened.");
context.emit_event(EventType::ImexProgress(10)); context.emit_event(EventType::ImexProgress(10));
@@ -414,13 +419,26 @@ async fn imex_inner(context: &Context, what: ImexMode, path: &Path) -> Result<()
ImexMode::ExportSelfKeys => export_self_keys(context, path).await, ImexMode::ExportSelfKeys => export_self_keys(context, path).await,
ImexMode::ImportSelfKeys => import_self_keys(context, path).await, ImexMode::ImportSelfKeys => import_self_keys(context, path).await,
ImexMode::ExportBackup => export_backup(context, path).await, ImexMode::ExportBackup => {
ImexMode::ImportBackup => import_backup(context, path).await, export_backup(context, path, passphrase.unwrap_or_default()).await
}
ImexMode::ImportBackup => {
import_backup(context, path, passphrase.unwrap_or_default()).await
}
} }
} }
/// Import Backup /// Imports backup into the currently open database.
async fn import_backup(context: &Context, backup_to_import: &Path) -> Result<()> { ///
/// The contents of the currently open database will be lost.
///
/// `passphrase` is the passphrase used to open backup database. If backup is unencrypted, pass
/// empty string here.
async fn import_backup(
context: &Context,
backup_to_import: &Path,
passphrase: String,
) -> Result<()> {
info!( info!(
context, context,
"Import \"{}\" to \"{}\".", "Import \"{}\" to \"{}\".",
@@ -436,12 +454,6 @@ async fn import_backup(context: &Context, backup_to_import: &Path) -> Result<()>
!context.scheduler.read().await.is_running(), !context.scheduler.read().await.is_running(),
"cannot import backup, IO already running" "cannot import backup, IO already running"
); );
context.sql.close().await;
dc_delete_file(context, context.get_dbfile()).await;
ensure!(
!context.get_dbfile().exists().await,
"Cannot delete old database."
);
let backup_file = File::open(backup_to_import).await?; let backup_file = File::open(backup_to_import).await?;
let file_size = backup_file.metadata().await?.len(); let file_size = backup_file.metadata().await?.len();
@@ -463,11 +475,15 @@ async fn import_backup(context: &Context, backup_to_import: &Path) -> Result<()>
if f.path()?.file_name() == Some(OsStr::new(DBFILE_BACKUP_NAME)) { if f.path()?.file_name() == Some(OsStr::new(DBFILE_BACKUP_NAME)) {
// async_tar can't unpack to a specified file name, so we just unpack to the blobdir and then move the unpacked file. // async_tar can't unpack to a specified file name, so we just unpack to the blobdir and then move the unpacked file.
f.unpack_in(context.get_blobdir()).await?; f.unpack_in(context.get_blobdir()).await?;
fs::rename( let unpacked_database = context.get_blobdir().join(DBFILE_BACKUP_NAME);
context.get_blobdir().join(DBFILE_BACKUP_NAME), context
context.get_dbfile(), .sql
) .import(&unpacked_database, passphrase.clone())
.await?; .await
.context("cannot import unpacked database")?;
fs::remove_file(unpacked_database)
.await
.context("cannot remove unpacked database")?;
} else { } else {
// async_tar will unpack to blobdir/BLOBS_BACKUP_NAME, so we move the file afterwards. // async_tar will unpack to blobdir/BLOBS_BACKUP_NAME, so we move the file afterwards.
f.unpack_in(context.get_blobdir()).await?; f.unpack_in(context.get_blobdir()).await?;
@@ -482,12 +498,6 @@ async fn import_backup(context: &Context, backup_to_import: &Path) -> Result<()>
} }
} }
context
.sql
.open(context)
.await
.context("Could not re-open db")?;
delete_and_reset_all_device_msgs(context).await?; delete_and_reset_all_device_msgs(context).await?;
Ok(()) Ok(())
@@ -496,12 +506,44 @@ async fn import_backup(context: &Context, backup_to_import: &Path) -> Result<()>
/******************************************************************************* /*******************************************************************************
* Export backup * Export backup
******************************************************************************/ ******************************************************************************/
#[allow(unused)]
async fn export_backup(context: &Context, dir: &Path) -> Result<()> { /// Returns Ok((temp_db_path, temp_path, dest_path)) on success. Unencrypted database can be
/// written to temp_db_path. The backup can then be written to temp_path. If the backup succeeded,
/// it can be renamed to dest_path. This guarantees that the backup is complete.
async fn get_next_backup_path(
folder: &Path,
backup_time: i64,
) -> Result<(PathBuf, PathBuf, PathBuf)> {
let folder = PathBuf::from(folder);
let stem = chrono::NaiveDateTime::from_timestamp(backup_time, 0)
// Don't change this file name format, in has_backup() we use string comparison to determine which backup is newer:
.format("delta-chat-backup-%Y-%m-%d")
.to_string();
// 64 backup files per day should be enough for everyone
for i in 0..64 {
let mut tempdbfile = folder.clone();
tempdbfile.push(format!("{}-{:02}.db", stem, i));
let mut tempfile = folder.clone();
tempfile.push(format!("{}-{:02}.tar.part", stem, i));
let mut destfile = folder.clone();
destfile.push(format!("{}-{:02}.tar", stem, i));
if !tempdbfile.exists().await && !tempfile.exists().await && !destfile.exists().await {
return Ok((tempdbfile, tempfile, destfile));
}
}
bail!("could not create backup file, disk full?");
}
async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Result<()> {
// get a fine backup file name (the name includes the date so that multiple backup instances are possible) // get a fine backup file name (the name includes the date so that multiple backup instances are possible)
let now = time(); let now = time();
let (temp_path, dest_path) = get_next_backup_path(dir, now).await?; let (temp_db_path, temp_path, dest_path) = get_next_backup_path(dir, now).await?;
let _d = DeleteOnDrop(temp_path.clone()); let _d1 = DeleteOnDrop(temp_db_path.clone());
let _d2 = DeleteOnDrop(temp_path.clone());
context context
.sql .sql
@@ -513,16 +555,14 @@ async fn export_backup(context: &Context, dir: &Path) -> Result<()> {
.sql .sql
.execute("VACUUM;", paramsv![]) .execute("VACUUM;", paramsv![])
.await .await
.map_err(|e| warn!(context, "Vacuum failed, exporting anyway {}", e)); .map_err(|e| warn!(context, "Vacuum failed, exporting anyway {}", e))
.ok();
ensure!( ensure!(
!context.scheduler.read().await.is_running(), !context.scheduler.read().await.is_running(),
"cannot export backup, IO already running" "cannot export backup, IO already running"
); );
// we close the database during the export
context.sql.close().await;
info!( info!(
context, context,
"Backup '{}' to '{}'.", "Backup '{}' to '{}'.",
@@ -530,10 +570,13 @@ async fn export_backup(context: &Context, dir: &Path) -> Result<()> {
dest_path.display(), dest_path.display(),
); );
let res = export_backup_inner(context, &temp_path).await; context
.sql
.export(&temp_db_path, passphrase)
.await
.with_context(|| format!("failed to backup plaintext database to {:?}", temp_db_path))?;
// we re-open the database after export is finished let res = export_backup_inner(context, &temp_db_path, &temp_path).await;
context.sql.open(context).await;
match &res { match &res {
Ok(_) => { Ok(_) => {
@@ -552,18 +595,21 @@ impl Drop for DeleteOnDrop {
fn drop(&mut self) { fn drop(&mut self) {
let file = self.0.clone(); let file = self.0.clone();
// Not using dc_delete_file() here because it would send a DeletedBlobFile event // Not using dc_delete_file() here because it would send a DeletedBlobFile event
async_std::task::block_on(async move { fs::remove_file(file).await.ok() }); async_std::task::block_on(fs::remove_file(file)).ok();
} }
} }
async fn export_backup_inner(context: &Context, temp_path: &PathBuf) -> Result<()> { async fn export_backup_inner(
context: &Context,
temp_db_path: &Path,
temp_path: &Path,
) -> Result<()> {
let file = File::create(temp_path).await?; let file = File::create(temp_path).await?;
let mut builder = async_tar::Builder::new(file); let mut builder = async_tar::Builder::new(file);
// append_path_with_name() wants the source path as the first argument, append_dir_all() wants it as the second argument.
builder builder
.append_path_with_name(context.get_dbfile(), DBFILE_BACKUP_NAME) .append_path_with_name(temp_db_path, DBFILE_BACKUP_NAME)
.await?; .await?;
let read_dir: Vec<_> = fs::read_dir(context.get_blobdir()).await?.collect().await; let read_dir: Vec<_> = fs::read_dir(context.get_blobdir()).await?.collect().await;
@@ -842,12 +888,12 @@ mod tests {
async fn test_export_and_import_key() { async fn test_export_and_import_key() {
let context = TestContext::new_alice().await; let context = TestContext::new_alice().await;
let blobdir = context.ctx.get_blobdir(); let blobdir = context.ctx.get_blobdir();
if let Err(err) = imex(&context.ctx, ImexMode::ExportSelfKeys, blobdir).await { if let Err(err) = imex(&context.ctx, ImexMode::ExportSelfKeys, blobdir, None).await {
panic!("got error on export: {:?}", err); panic!("got error on export: {:?}", err);
} }
let context2 = TestContext::new_alice().await; let context2 = TestContext::new_alice().await;
if let Err(err) = imex(&context2.ctx, ImexMode::ImportSelfKeys, blobdir).await { if let Err(err) = imex(&context2.ctx, ImexMode::ImportSelfKeys, blobdir, None).await {
panic!("got error on import: {:?}", err); panic!("got error on import: {:?}", err);
} }
} }

View File

@@ -10,7 +10,7 @@ use std::time::Duration;
use anyhow::{bail, Context as _, Result}; use anyhow::{bail, Context as _, Result};
use async_std::path::PathBuf; use async_std::path::PathBuf;
use async_std::prelude::*; use async_std::prelude::*;
use rusqlite::{Connection, OpenFlags}; use rusqlite::{config::DbConfig, Connection, OpenFlags};
use crate::blob::BlobObject; use crate::blob::BlobObject;
use crate::chat::{add_device_msg, update_device_icon, update_saved_messages_icon}; use crate::chat::{add_device_msg, update_device_icon, update_saved_messages_icon};
@@ -43,11 +43,6 @@ pub struct Sql {
pub(crate) dbfile: PathBuf, pub(crate) dbfile: PathBuf,
pool: RwLock<Option<r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>>>, pool: RwLock<Option<r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>>>,
/// SQLCipher passphrase.
///
/// Empty string if database is not encrypted.
passphrase: RwLock<String>,
} }
impl Sql { impl Sql {
@@ -55,18 +50,17 @@ impl Sql {
Self { Self {
dbfile, dbfile,
pool: Default::default(), pool: Default::default(),
passphrase: Default::default(),
} }
} }
/// Sets SQLCipher passphrase for key derivation. /// Tests SQLCipher passphrase.
/// ///
/// Returns true if passphrase is correct, i.e. the database is new or can be unlocked with /// Returns true if passphrase is correct, i.e. the database is new or can be unlocked with
/// this passphrase, and false if the database is already encrypted with another passphrase or /// this passphrase, and false if the database is already encrypted with another passphrase or
/// corrupted. /// corrupted.
/// ///
/// Fails if database is already open. /// Fails if database is already open.
pub async fn set_passphrase(&self, passphrase: String) -> Result<bool> { pub async fn check_passphrase(&self, passphrase: String) -> Result<bool> {
if self.is_open().await { if self.is_open().await {
bail!("Database is already opened."); bail!("Database is already opened.");
} }
@@ -83,10 +77,6 @@ impl Sql {
.query_row("SELECT count(*) FROM sqlite_master", [], |_row| Ok(())) .query_row("SELECT count(*) FROM sqlite_master", [], |_row| Ok(()))
.is_ok(); .is_ok();
if key_is_correct {
*self.passphrase.write().await = passphrase;
}
Ok(key_is_correct) Ok(key_is_correct)
} }
@@ -96,11 +86,67 @@ impl Sql {
} }
/// Closes all underlying Sqlite connections. /// Closes all underlying Sqlite connections.
pub async fn close(&self) { async fn close(&self) {
let _ = self.pool.write().await.take(); let _ = self.pool.write().await.take();
// drop closes the connection // drop closes the connection
} }
/// Exports the database to a separate file with the given passphrase.
///
/// Set passphrase to empty string to export the database unencrypted.
pub(crate) async fn export(&self, path: &Path, passphrase: String) -> Result<()> {
let path_str = path
.to_str()
.with_context(|| format!("path {:?} is not valid unicode", path))?;
let conn = self.get_conn().await?;
conn.execute(
"ATTACH DATABASE ? AS backup KEY ?",
paramsv![path_str, passphrase],
)
.context("failed to attach backup database")?;
let res = conn
.query_row("SELECT sqlcipher_export('backup')", [], |_row| Ok(()))
.context("failed to export to attached backup database");
conn.execute("DETACH DATABASE backup", [])
.context("failed to detach backup database")?;
res?;
Ok(())
}
/// Imports the database from a separate file with the given passphrase.
pub(crate) async fn import(&self, path: &Path, passphrase: String) -> Result<()> {
let path_str = path
.to_str()
.with_context(|| format!("path {:?} is not valid unicode", path))?;
let conn = self.get_conn().await?;
// Reset the database without reopening it. We don't want to reopen the database because we
// don't have main database passphrase at this point.
// See <https://sqlite.org/c3ref/c_dbconfig_enable_fkey.html> for documentation.
// Without resetting import may fail due to existing tables.
conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, true)
.context("failed to set SQLITE_DBCONFIG_RESET_DATABASE")?;
conn.execute("VACUUM", [])
.context("failed to vacuum the database")?;
conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, false)
.context("failed to unset SQLITE_DBCONFIG_RESET_DATABASE")?;
conn.execute(
"ATTACH DATABASE ? AS backup KEY ?",
paramsv![path_str, passphrase],
)
.context("failed to attach backup database")?;
let res = conn
.query_row("SELECT sqlcipher_export('main', 'backup')", [], |_row| {
Ok(())
})
.context("failed to import from attached backup database");
conn.execute("DETACH DATABASE backup", [])
.context("failed to detach backup database")?;
res?;
Ok(())
}
fn new_pool( fn new_pool(
dbfile: &Path, dbfile: &Path,
passphrase: String, passphrase: String,
@@ -231,7 +277,7 @@ impl Sql {
/// Opens the provided database and runs any necessary migrations. /// Opens the provided database and runs any necessary migrations.
/// If a database is already open, this will return an error. /// If a database is already open, this will return an error.
pub async fn open(&self, context: &Context) -> Result<()> { pub async fn open(&self, context: &Context, passphrase: String) -> Result<()> {
if self.is_open().await { if self.is_open().await {
error!( error!(
context, context,
@@ -240,13 +286,7 @@ impl Sql {
bail!("SQL database is already opened."); bail!("SQL database is already opened.");
} }
let passphrase_lock = self.passphrase.read().await; if let Err(err) = self.try_open(context, &self.dbfile, passphrase).await {
let passphrase: &str = passphrase_lock.as_ref();
if let Err(err) = self
.try_open(context, &self.dbfile, passphrase.to_string())
.await
{
self.close().await; self.close().await;
Err(err) Err(err)
} else { } else {
@@ -828,7 +868,7 @@ mod tests {
t.sql.close().await; t.sql.close().await;
housekeeping(&t).await.unwrap_err(); // housekeeping should fail as the db is closed housekeeping(&t).await.unwrap_err(); // housekeeping should fail as the db is closed
t.sql.open(&t).await.unwrap(); t.sql.open(&t, "".to_string()).await.unwrap();
let a = t.get_config(Config::Selfavatar).await.unwrap().unwrap(); let a = t.get_config(Config::Selfavatar).await.unwrap().unwrap();
assert_eq!(avatar_bytes, &async_std::fs::read(&a).await.unwrap()[..]); assert_eq!(avatar_bytes, &async_std::fs::read(&a).await.unwrap()[..]);
@@ -871,11 +911,11 @@ mod tests {
let sql = Sql::new(dbfile.into()); let sql = Sql::new(dbfile.into());
// Create database with all the tables. // Create database with all the tables.
sql.open(&t).await.unwrap(); sql.open(&t, "".to_string()).await.unwrap();
sql.close().await; sql.close().await;
// Reopen the database // Reopen the database
sql.open(&t).await?; sql.open(&t, "".to_string()).await?;
sql.execute( sql.execute(
"INSERT INTO config (keyname, value) VALUES (?, ?);", "INSERT INTO config (keyname, value) VALUES (?, ?);",
paramsv!("foo", "bar"), paramsv!("foo", "bar"),
@@ -930,7 +970,7 @@ mod tests {
} }
#[async_std::test] #[async_std::test]
async fn test_set_passphrase() -> Result<()> { async fn test_check_passphrase() -> Result<()> {
use tempfile::tempdir; use tempfile::tempdir;
// The context is used only for logging. // The context is used only for logging.
@@ -941,8 +981,8 @@ mod tests {
let dbfile = dir.path().join("testdb.sqlite"); let dbfile = dir.path().join("testdb.sqlite");
let sql = Sql::new(dbfile.clone().into()); let sql = Sql::new(dbfile.clone().into());
sql.set_passphrase("foo".to_string()).await?; sql.check_passphrase("foo".to_string()).await?;
sql.open(&t) sql.open(&t, "foo".to_string())
.await .await
.context("failed to open the database first time")?; .context("failed to open the database first time")?;
sql.close().await; sql.close().await;
@@ -951,11 +991,11 @@ mod tests {
let sql = Sql::new(dbfile.into()); let sql = Sql::new(dbfile.into());
// Test that we can't open encrypted database without a passphrase. // Test that we can't open encrypted database without a passphrase.
assert!(sql.open(&t).await.is_err()); assert!(sql.open(&t, "".to_string()).await.is_err());
// Now set the passphrase and open the database, it should succeed. // Now open the database with passpharse, it should succeed.
sql.set_passphrase("foo".to_string()).await?; sql.check_passphrase("foo".to_string()).await?;
sql.open(&t) sql.open(&t, "foo".to_string())
.await .await
.context("failed to open the database second time")?; .context("failed to open the database second time")?;
Ok(()) Ok(())