integrate backup sending via iroh into the repl

This commit is contained in:
dignifiedquire
2022-07-07 17:34:40 +02:00
parent 0e06bcb182
commit 9f7e962832
10 changed files with 100 additions and 36 deletions

View File

@@ -7,7 +7,7 @@ license = "MPL-2.0"
rust-version = "1.57"
[profile.dev]
debug = 0
#debug = 0
panic = 'abort'
opt-level = 1
@@ -86,6 +86,7 @@ async_zip = { version = "0.0.9", default-features = false, features = ["deflate"
iroh-share = { path = "../../iroh/iroh-share" }
iroh-resolver = { path = "../../iroh/iroh-resolver" }
tempfile = "3"
multibase = "0.9.1"
[dev-dependencies]
ansi_term = "0.12.0"
@@ -156,4 +157,4 @@ nightly = ["pgp/nightly"]
chacha20poly1305 = { path = "../../rustcrypto/AEADs/chacha20poly1305" }
chacha20 = { path = "../../rustcrypto/stream-ciphers/chacha20" }
pgp = { path = "../../rpgp/rpgp" }
libp2p = { git = "https://github.com/dignifiedquire/rust-libp2p", branch = "feat-kad-count" }
libp2p = { git = "https://github.com/dignifiedquire/rust-libp2p", branch = "feat-kad-count" }

View File

@@ -335,6 +335,7 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
continue-key-transfer <msg-id> <setup-code>\n\
has-backup\n\
export-backup\n\
send-backup\n\
import-backup <backup-file>\n\
export-keys\n\
import-keys\n\
@@ -477,6 +478,19 @@ pub async fn cmdline(context: Context, line: &str, chat_id: &mut ChatId) -> Resu
.await?;
println!("Exported to {}.", dir.to_string_lossy());
}
"send-backup" => {
let dir = dirs::home_dir().unwrap_or_default();
let (sender, transfer) =
send_backup(&context, dir.as_ref(), Some(arg2.to_string())).await?;
let ticket_bytes = transfer.ticket().as_bytes();
println!(
"Ticket: {}",
multibase::encode(multibase::Base::Base64, &ticket_bytes)
);
tokio::time::sleep(std::time::Duration::from_secs(100)).await;
sender.close().await?;
}
"import-backup" => {
ensure!(!arg1.is_empty(), "Argument <backup-file> missing.");
imex(

View File

@@ -222,7 +222,7 @@ impl<'a> BlobObject<'a> {
/// to be lowercase.
pub fn suffix(&self) -> Option<&str> {
let ext = self.name.rsplit('.').next();
if ext == Some(&self.name) {
if ext == Some(self.name.as_str()) {
None
} else {
ext

View File

@@ -455,7 +455,7 @@ async fn configure(ctx: &Context, param: &mut LoginParam) -> Result<()> {
progress!(ctx, 910);
if ctx.get_config(Config::ConfiguredAddr).await?.as_deref() != Some(&param.addr) {
if ctx.get_config(Config::ConfiguredAddr).await?.as_deref() != Some(param.addr.as_str()) {
// Switched account, all server UIDs we know are invalid
job::schedule_resync(ctx).await?;
}

View File

@@ -48,7 +48,7 @@ fn format_line_flowed(line: &str, prefix: &str) -> String {
after_space = false;
}
}
result + &buffer
result + buffer.as_str()
}
/// Returns text formatted according to RFC 3767 (format=flowed).

View File

@@ -61,7 +61,6 @@ pub enum ImexMode {
/// created by DC_IMEX_EXPORT_BACKUP and detected by imex_has_backup(). Importing a backup
/// is only possible as long as the context is not configured or used in another way.
ImportBackup = 12,
ExportBackupIroh = 13,
}
/// Import/export things.
@@ -107,6 +106,53 @@ pub async fn imex(
res
}
pub async fn send_backup(
context: &Context,
path: &Path,
passphrase: Option<String>,
) -> Result<(iroh_share::Sender, iroh_share::SenderTransfer)> {
let cancel = context.alloc_ongoing().await?;
let res = send_backup_inner(context, path, passphrase)
.race(async {
cancel.recv().await.ok();
Err(format_err!("canceled"))
})
.await;
context.free_ongoing().await;
if let Err(err) = res.as_ref() {
// We are using Anyhow's .context() and to show the inner error, too, we need the {:#}:
error!(context, "IMEX failed to complete: {:#}", err);
context.emit_event(EventType::ImexProgress(0));
} else {
info!(context, "IMEX successfully completed");
context.emit_event(EventType::ImexProgress(1000));
}
res
}
async fn send_backup_inner(
context: &Context,
path: &Path,
passphrase: Option<String>,
) -> Result<(iroh_share::Sender, iroh_share::SenderTransfer)> {
info!(context, "Import/export dir: {}", path.display());
ensure!(context.sql.is_open().await, "Database not opened.");
context.emit_event(EventType::ImexProgress(10));
// before we export anything, make sure the private key exists
if e2ee::ensure_secret_key_exists(context).await.is_err() {
bail!("Cannot create private key or private key not available.");
} else {
create_folder(context, &path).await?;
}
export_backup_iroh(context, path, passphrase.unwrap_or_default()).await
}
/// Returns the filename of the backup found (otherwise an error)
pub async fn has_backup(_context: &Context, dir_name: &Path) -> Result<String> {
let mut dir_iter = tokio::fs::read_dir(dir_name).await?;
@@ -385,9 +431,6 @@ async fn imex_inner(
ImexMode::ExportBackup => {
export_backup(context, path, passphrase.unwrap_or_default()).await
}
ImexMode::ExportBackupIroh => {
export_backup_iroh(context, path, passphrase.unwrap_or_default()).await
}
ImexMode::ImportBackup => {
import_backup(context, path, passphrase.unwrap_or_default()).await?;
context.sql.run_migrations(context).await
@@ -612,7 +655,11 @@ async fn export_backup_inner(
Ok(())
}
async fn export_backup_iroh(context: &Context, dir: &Path, passphrase: String) -> Result<()> {
async fn export_backup_iroh(
context: &Context,
dir: &Path,
passphrase: String,
) -> Result<(iroh_share::Sender, iroh_share::SenderTransfer)> {
// get a fine backup file name (the name includes the date so that multiple backup instances are possible)
let now = time();
let (temp_db_path, temp_path, dest_path) = get_next_backup_path(dir, now)?;
@@ -652,36 +699,34 @@ async fn export_backup_iroh(context: &Context, dir: &Path, passphrase: String) -
let res = export_backup_iroh_inner(context, &temp_db_path, &temp_path).await;
match &res {
match res {
Ok(dir_builder) => {
let port = 9990;
let rpc_p2p_port = 5550;
let rpc_store_port = 5560;
// TODO: not tempfile
let sender_dir = tempfile::tempdir().unwrap();
let sender_db = sender_dir.path().join("db");
// TODO: cleanup
let sender_db = sender_dir.into_path().join("db");
let sender =
iroh_share::Sender::new(port, rpc_p2p_port, rpc_store_port, &sender_db).await?;
let transfer = sender.transfer_from_dir_builder(dir_builder).await?;
let ticket = transfer.ticket().await?;
let ticket_bytes = ticket.as_bytes();
context.emit_event(EventType::ImexBackupReady(ticket_bytes));
Ok((sender, transfer))
}
Err(e) => {
error!(context, "backup failed: {}", e);
Err(e)
}
}
res
}
async fn export_backup_iroh_inner(
context: &Context,
temp_db_path: &Path,
temp_path: &Path,
) -> Result<()> {
use iroh_resolver::unixfs_builder::*;
) -> Result<iroh_resolver::unixfs_builder::DirectoryBuilder> {
use iroh_resolver::unixfs_builder::{DirectoryBuilder, FileBuilder};
let mut dir_builder = DirectoryBuilder::new();
dir_builder.name(
@@ -691,11 +736,13 @@ async fn export_backup_iroh_inner(
.unwrap_or_default(),
);
let mut file = FileBuilder::new();
let db_content = tokio::fs::File::open(temp_db_path).await?;
file.name(DBFILE_BACKUP_NAME).content_reader(db_content);
{
let mut file = FileBuilder::new();
let db_content = tokio::fs::File::open(temp_db_path).await?;
file.name(DBFILE_BACKUP_NAME).content_reader(db_content);
dir_builder.add_file(file.build());
dir_builder.add_file(file.build().await?);
}
let read_dir: Vec<_> =
tokio_stream::wrappers::ReadDirStream::new(fs::read_dir(context.get_blobdir()).await?)
@@ -715,13 +762,14 @@ async fn export_backup_iroh_inner(
);
continue;
}
let mut file = File::open(entry.path()).await?;
let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(name);
let file_content = File::open(entry.path()).await?;
let mut file = FileBuilder::new();
file.name(name.to_string_lossy().to_owned());
file.content_reader(file);
dir_builder.add_file(file.build());
{
let mut file = FileBuilder::new();
file.name(name.to_string_lossy().to_owned());
file.content_reader(file_content);
dir_builder.add_file(file.build().await?);
}
written_files += 1;
let progress = 1000 * written_files / count;

View File

@@ -1285,19 +1285,20 @@ impl<'a> MimeFactory<'a> {
let extension_fields = if additional_msg_ids.is_empty() {
"".to_string()
} else {
"Additional-Message-IDs: ".to_string()
+ &additional_msg_ids
format!(
"Additional-Message-IDs: {}\r\n",
additional_msg_ids
.iter()
.map(|mid| render_rfc724_mid(mid))
.collect::<Vec<String>>()
.join(" ")
+ "\r\n"
)
};
message = message.child(
PartBuilder::new()
.content_type(&"message/disposition-notification".parse().unwrap())
.body(message_text2 + &extension_fields)
.body(message_text2 + extension_fields.as_str())
.build(),
);

View File

@@ -83,7 +83,7 @@ impl PlainText {
} else {
// normal, fixed text
if is_quote {
line = "<em>".to_owned() + &line + "</em>";
line = format!("<em>{}</em>", &line);
}
line += "<br/>\n";
}

View File

@@ -154,7 +154,7 @@ pub async fn get_provider_by_mx(context: &Context, domain: &str) -> Option<&'sta
}
let provider_fqdn = provider_domain.to_string() + ".";
let provider_fqdn_dot = ".".to_string() + &provider_fqdn;
let provider_fqdn_dot = format!(".{}", provider_fqdn);
for mx_domain in mx_domains.iter() {
let mx_domain = mx_domain.exchange().to_lowercase().to_utf8();

View File

@@ -10,7 +10,7 @@
// (to be only compatible with delta, only "[\r\n|\n]-- {0,2}[\r\n|\n]" needs to be replaced)
pub fn escape_message_footer_marks(text: &str) -> String {
if let Some(text) = text.strip_prefix("--") {
"-\u{200B}-".to_string() + &text.replace("\n--", "\n-\u{200B}-")
format!("-\u{200B}-{}", text.replace("\n--", "\n-\u{200B}-"))
} else {
text.replace("\n--", "\n-\u{200B}-")
}