mirror of
https://github.com/chatmail/core.git
synced 2026-04-18 22:16:30 +03:00
feat: improve internal sql interface
Switches from rusqlite to sqlx to have a fully async based interface to sqlite. Co-authored-by: B. Petersen <r10s@b44t.com> Co-authored-by: Hocuri <hocuri@gmx.de> Co-authored-by: link2xt <link2xt@testrun.org>
This commit is contained in:
committed by
dignifiedquire
parent
4dedc2d8ce
commit
6bb5721f29
19
src/sql/error.rs
Normal file
19
src/sql/error.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("Sqlx: {0:?}")]
|
||||
Sqlx(#[from] sqlx::Error),
|
||||
#[error("Sqlite: Connection closed")]
|
||||
SqlNoConnection,
|
||||
#[error("Sqlite: Already open")]
|
||||
SqlAlreadyOpen,
|
||||
#[error("Sqlite: Failed to open")]
|
||||
SqlFailedToOpen,
|
||||
#[error("{0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
// #[error("{0:?}")]
|
||||
// BlobError(#[from] crate::blob::BlobError),
|
||||
#[error("{0}")]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
505
src/sql/migrations.rs
Normal file
505
src/sql/migrations.rs
Normal file
@@ -0,0 +1,505 @@
|
||||
use async_std::prelude::*;
|
||||
|
||||
use super::{Result, Sql};
|
||||
use crate::config::Config;
|
||||
use crate::constants::ShowEmails;
|
||||
use crate::context::Context;
|
||||
use crate::dc_tools::EmailAddress;
|
||||
use crate::imap;
|
||||
use crate::provider::get_provider_by_domain;
|
||||
|
||||
const DBVERSION: i32 = 68;
|
||||
const VERSION_CFG: &str = "dbversion";
|
||||
const TABLES: &str = include_str!("./tables.sql");
|
||||
|
||||
pub async fn run(context: &Context, sql: &Sql) -> Result<(bool, bool, bool)> {
|
||||
let mut recalc_fingerprints = false;
|
||||
let mut exists_before_update = false;
|
||||
let mut dbversion_before_update = DBVERSION;
|
||||
|
||||
if !sql.table_exists("config").await? {
|
||||
info!(context, "First time init: creating tables",);
|
||||
sql.transaction(move |conn| {
|
||||
Box::pin(async move {
|
||||
sqlx::query(TABLES)
|
||||
.execute_many(&mut *conn)
|
||||
.await
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.await?;
|
||||
|
||||
// set raw config inside the transaction
|
||||
sqlx::query("INSERT INTO config (keyname, value) VALUES (?, ?);")
|
||||
.bind(VERSION_CFG)
|
||||
.bind(format!("{}", dbversion_before_update))
|
||||
.execute(&mut *conn)
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
} else {
|
||||
exists_before_update = true;
|
||||
dbversion_before_update = sql
|
||||
.get_raw_config_int(VERSION_CFG)
|
||||
.await?
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
let dbversion = dbversion_before_update;
|
||||
let mut update_icons = !exists_before_update;
|
||||
let mut disable_server_delete = false;
|
||||
|
||||
if dbversion < 1 {
|
||||
info!(context, "[migration] v1");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
CREATE TABLE leftgrps ( id INTEGER PRIMARY KEY, grpid TEXT DEFAULT '');
|
||||
CREATE INDEX leftgrps_index1 ON leftgrps (grpid);"#,
|
||||
1,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 2 {
|
||||
info!(context, "[migration] v2");
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE contacts ADD COLUMN authname TEXT DEFAULT '';",
|
||||
2,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 7 {
|
||||
info!(context, "[migration] v7");
|
||||
sql.execute_migration(
|
||||
"CREATE TABLE keypairs (\
|
||||
id INTEGER PRIMARY KEY, \
|
||||
addr TEXT DEFAULT '' COLLATE NOCASE, \
|
||||
is_default INTEGER DEFAULT 0, \
|
||||
private_key, \
|
||||
public_key, \
|
||||
created INTEGER DEFAULT 0);",
|
||||
7,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 10 {
|
||||
info!(context, "[migration] v10");
|
||||
sql.execute_migration(
|
||||
"CREATE TABLE acpeerstates (\
|
||||
id INTEGER PRIMARY KEY, \
|
||||
addr TEXT DEFAULT '' COLLATE NOCASE, \
|
||||
last_seen INTEGER DEFAULT 0, \
|
||||
last_seen_autocrypt INTEGER DEFAULT 0, \
|
||||
public_key, \
|
||||
prefer_encrypted INTEGER DEFAULT 0); \
|
||||
CREATE INDEX acpeerstates_index1 ON acpeerstates (addr);",
|
||||
10,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 12 {
|
||||
info!(context, "[migration] v12");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
CREATE TABLE msgs_mdns ( msg_id INTEGER, contact_id INTEGER);
|
||||
CREATE INDEX msgs_mdns_index1 ON msgs_mdns (msg_id);"#,
|
||||
12,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 17 {
|
||||
info!(context, "[migration] v17");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE chats ADD COLUMN archived INTEGER DEFAULT 0;
|
||||
CREATE INDEX chats_index2 ON chats (archived);
|
||||
-- 'starred' column is not used currently
|
||||
-- (dropping is not easily doable and stop adding it will make reusing it complicated)
|
||||
ALTER TABLE msgs ADD COLUMN starred INTEGER DEFAULT 0;
|
||||
CREATE INDEX msgs_index5 ON msgs (starred);"#,
|
||||
17,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 18 {
|
||||
info!(context, "[migration] v18");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE acpeerstates ADD COLUMN gossip_timestamp INTEGER DEFAULT 0;
|
||||
ALTER TABLE acpeerstates ADD COLUMN gossip_key;"#,
|
||||
18,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 27 {
|
||||
info!(context, "[migration] v27");
|
||||
// chat.id=1 and chat.id=2 are the old deaddrops,
|
||||
// the current ones are defined by chats.blocked=2
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
DELETE FROM msgs WHERE chat_id=1 OR chat_id=2;"
|
||||
CREATE INDEX chats_contacts_index2 ON chats_contacts (contact_id);"
|
||||
ALTER TABLE msgs ADD COLUMN timestamp_sent INTEGER DEFAULT 0;")
|
||||
ALTER TABLE msgs ADD COLUMN timestamp_rcvd INTEGER DEFAULT 0;"#,
|
||||
27,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 34 {
|
||||
info!(context, "[migration] v34");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE msgs ADD COLUMN hidden INTEGER DEFAULT 0;
|
||||
ALTER TABLE msgs_mdns ADD COLUMN timestamp_sent INTEGER DEFAULT 0;
|
||||
ALTER TABLE acpeerstates ADD COLUMN public_key_fingerprint TEXT DEFAULT '';
|
||||
ALTER TABLE acpeerstates ADD COLUMN gossip_key_fingerprint TEXT DEFAULT '';
|
||||
CREATE INDEX acpeerstates_index3 ON acpeerstates (public_key_fingerprint);
|
||||
CREATE INDEX acpeerstates_index4 ON acpeerstates (gossip_key_fingerprint);"#,
|
||||
34,
|
||||
)
|
||||
.await?;
|
||||
recalc_fingerprints = true;
|
||||
}
|
||||
if dbversion < 39 {
|
||||
info!(context, "[migration] v39");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
CREATE TABLE tokens (
|
||||
id INTEGER PRIMARY KEY,
|
||||
namespc INTEGER DEFAULT 0,
|
||||
foreign_id INTEGER DEFAULT 0,
|
||||
token TEXT DEFAULT '',
|
||||
timestamp INTEGER DEFAULT 0
|
||||
);
|
||||
ALTER TABLE acpeerstates ADD COLUMN verified_key;
|
||||
ALTER TABLE acpeerstates ADD COLUMN verified_key_fingerprint TEXT DEFAULT '';
|
||||
CREATE INDEX acpeerstates_index5 ON acpeerstates (verified_key_fingerprint);"#,
|
||||
38,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 40 {
|
||||
info!(context, "[migration] v40");
|
||||
sql.execute_migration("ALTER TABLE jobs ADD COLUMN thread INTEGER DEFAULT 0;", 40)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 44 {
|
||||
info!(context, "[migration] v44");
|
||||
sql.execute_migration("ALTER TABLE msgs ADD COLUMN mime_headers TEXT;", 44)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 46 {
|
||||
info!(context, "[migration] v46");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE msgs ADD COLUMN mime_in_reply_to TEXT;
|
||||
ALTER TABLE msgs ADD COLUMN mime_references TEXT;"#,
|
||||
46,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 47 {
|
||||
info!(context, "[migration] v47");
|
||||
sql.execute_migration("ALTER TABLE jobs ADD COLUMN tries INTEGER DEFAULT 0;", 47)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 48 {
|
||||
info!(context, "[migration] v48");
|
||||
// NOTE: move_state is not used anymore
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE msgs ADD COLUMN move_state INTEGER DEFAULT 1;",
|
||||
48,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 49 {
|
||||
info!(context, "[migration] v49");
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE chats ADD COLUMN gossiped_timestamp INTEGER DEFAULT 0;",
|
||||
49,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 50 {
|
||||
info!(context, "[migration] v50");
|
||||
// installations <= 0.100.1 used DC_SHOW_EMAILS_ALL implicitly;
|
||||
// keep this default and use DC_SHOW_EMAILS_NO
|
||||
// only for new installations
|
||||
if exists_before_update {
|
||||
sql.set_raw_config_int("show_emails", ShowEmails::All as i32)
|
||||
.await?;
|
||||
}
|
||||
sql.set_db_version(50).await?;
|
||||
}
|
||||
if dbversion < 53 {
|
||||
info!(context, "[migration] v53");
|
||||
// the messages containing _only_ locations
|
||||
// are also added to the database as _hidden_.
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
CREATE TABLE locations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
latitude REAL DEFAULT 0.0,
|
||||
longitude REAL DEFAULT 0.0,
|
||||
accuracy REAL DEFAULT 0.0,
|
||||
timestamp INTEGER DEFAULT 0,
|
||||
chat_id INTEGER DEFAULT 0,
|
||||
from_id INTEGER DEFAULT 0
|
||||
);"
|
||||
CREATE INDEX locations_index1 ON locations (from_id);
|
||||
CREATE INDEX locations_index2 ON locations (timestamp);
|
||||
ALTER TABLE chats ADD COLUMN locations_send_begin INTEGER DEFAULT 0;
|
||||
ALTER TABLE chats ADD COLUMN locations_send_until INTEGER DEFAULT 0;
|
||||
ALTER TABLE chats ADD COLUMN locations_last_sent INTEGER DEFAULT 0;
|
||||
CREATE INDEX chats_index3 ON chats (locations_send_until);"#,
|
||||
53,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 54 {
|
||||
info!(context, "[migration] v54");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE msgs ADD COLUMN location_id INTEGER DEFAULT 0;
|
||||
CREATE INDEX msgs_index6 ON msgs (location_id);"#,
|
||||
54,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 55 {
|
||||
info!(context, "[migration] v55");
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE locations ADD COLUMN independent INTEGER DEFAULT 0;",
|
||||
55,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 59 {
|
||||
info!(context, "[migration] v59");
|
||||
// records in the devmsglabels are kept when the message is deleted.
|
||||
// so, msg_id may or may not exist.
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
CREATE TABLE devmsglabels (id INTEGER PRIMARY KEY AUTOINCREMENT, label TEXT, msg_id INTEGER DEFAULT 0);",
|
||||
CREATE INDEX devmsglabels_index1 ON devmsglabels (label);"#, 59)
|
||||
.await?;
|
||||
if exists_before_update && sql.get_raw_config_int("bcc_self").await?.is_none() {
|
||||
sql.set_raw_config_int("bcc_self", 1).await?;
|
||||
}
|
||||
}
|
||||
|
||||
if dbversion < 60 {
|
||||
info!(context, "[migration] v60");
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE chats ADD COLUMN created_timestamp INTEGER DEFAULT 0;",
|
||||
60,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 61 {
|
||||
info!(context, "[migration] v61");
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE contacts ADD COLUMN selfavatar_sent INTEGER DEFAULT 0;",
|
||||
61,
|
||||
)
|
||||
.await?;
|
||||
update_icons = true;
|
||||
}
|
||||
if dbversion < 62 {
|
||||
info!(context, "[migration] v62");
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE chats ADD COLUMN muted_until INTEGER DEFAULT 0;",
|
||||
62,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 63 {
|
||||
info!(context, "[migration] v63");
|
||||
sql.execute_migration("UPDATE chats SET grpid='' WHERE type=100", 63)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 64 {
|
||||
info!(context, "[migration] v64");
|
||||
sql.execute_migration("ALTER TABLE msgs ADD COLUMN error TEXT DEFAULT '';", 64)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 65 {
|
||||
info!(context, "[migration] v65");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE chats ADD COLUMN ephemeral_timer INTEGER;
|
||||
ALTER TABLE msgs ADD COLUMN ephemeral_timer INTEGER DEFAULT 0;
|
||||
ALTER TABLE msgs ADD COLUMN ephemeral_timestamp INTEGER DEFAULT 0;"#,
|
||||
65,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 66 {
|
||||
info!(context, "[migration] v66");
|
||||
update_icons = true;
|
||||
sql.set_db_version(66).await?;
|
||||
}
|
||||
if dbversion < 67 {
|
||||
info!(context, "[migration] v67");
|
||||
for prefix in &["", "configured_"] {
|
||||
if let Some(server_flags) = sql
|
||||
.get_raw_config_int(format!("{}server_flags", prefix))
|
||||
.await?
|
||||
{
|
||||
let imap_socket_flags = server_flags & 0x700;
|
||||
let key = format!("{}mail_security", prefix);
|
||||
match imap_socket_flags {
|
||||
0x100 => sql.set_raw_config_int(key, 2).await?, // STARTTLS
|
||||
0x200 => sql.set_raw_config_int(key, 1).await?, // SSL/TLS
|
||||
0x400 => sql.set_raw_config_int(key, 3).await?, // Plain
|
||||
_ => sql.set_raw_config_int(key, 0).await?,
|
||||
}
|
||||
let smtp_socket_flags = server_flags & 0x70000;
|
||||
let key = format!("{}send_security", prefix);
|
||||
match smtp_socket_flags {
|
||||
0x10000 => sql.set_raw_config_int(key, 2).await?, // STARTTLS
|
||||
0x20000 => sql.set_raw_config_int(key, 1).await?, // SSL/TLS
|
||||
0x40000 => sql.set_raw_config_int(key, 3).await?, // Plain
|
||||
_ => sql.set_raw_config_int(key, 0).await?,
|
||||
}
|
||||
}
|
||||
}
|
||||
sql.set_db_version(67).await?;
|
||||
}
|
||||
if dbversion < 68 {
|
||||
info!(context, "[migration] v68");
|
||||
// the index is used to speed up get_fresh_msg_cnt() (see comment there for more details) and marknoticed_chat()
|
||||
sql.execute_migration(
|
||||
"CREATE INDEX IF NOT EXISTS msgs_index7 ON msgs (state, hidden, chat_id);",
|
||||
68,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 69 {
|
||||
info!(context, "[migration] v69");
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE chats ADD COLUMN protected INTEGER DEFAULT 0;
|
||||
-- 120=group, 130=old verified group
|
||||
UPDATE chats SET protected=1, type=120 WHERE type=130;"#,
|
||||
69,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if dbversion < 71 {
|
||||
info!(context, "[migration] v71");
|
||||
if let Some(addr) = context.get_config(Config::ConfiguredAddr).await? {
|
||||
if let Ok(domain) = addr.parse::<EmailAddress>().map(|email| email.domain) {
|
||||
context
|
||||
.set_config(
|
||||
Config::ConfiguredProvider,
|
||||
get_provider_by_domain(&domain).map(|provider| provider.id),
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
warn!(context, "Can't parse configured address: {:?}", addr);
|
||||
}
|
||||
}
|
||||
|
||||
sql.set_db_version(71).await?;
|
||||
}
|
||||
if dbversion < 72 {
|
||||
info!(context, "[migration] v72");
|
||||
if !sql.col_exists("msgs", "mime_modified").await? {
|
||||
sql.execute_migration(
|
||||
r#"
|
||||
ALTER TABLE msgs ADD COLUMN mime_modified INTEGER DEFAULT 0;"#,
|
||||
72,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
if dbversion < 73 {
|
||||
use Config::*;
|
||||
info!(context, "[migration] v73");
|
||||
sql.execute(
|
||||
r#"
|
||||
CREATE TABLE imap_sync (folder TEXT PRIMARY KEY, uidvalidity INTEGER DEFAULT 0, uid_next INTEGER DEFAULT 0);"#,
|
||||
)
|
||||
.await?;
|
||||
for c in &[
|
||||
ConfiguredInboxFolder,
|
||||
ConfiguredSentboxFolder,
|
||||
ConfiguredMvboxFolder,
|
||||
] {
|
||||
if let Some(folder) = context.get_config(*c).await? {
|
||||
let (uid_validity, last_seen_uid) =
|
||||
imap::get_config_last_seen_uid(context, &folder).await?;
|
||||
if last_seen_uid > 0 {
|
||||
imap::set_uid_next(context, &folder, last_seen_uid + 1).await?;
|
||||
imap::set_uidvalidity(context, &folder, uid_validity).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
if exists_before_update {
|
||||
disable_server_delete = true;
|
||||
|
||||
// Don't disable server delete if it was on by default (Nauta):
|
||||
if let Some(provider) = context.get_configured_provider().await? {
|
||||
if let Some(defaults) = &provider.config_defaults {
|
||||
if defaults.iter().any(|d| d.key == Config::DeleteServerAfter) {
|
||||
disable_server_delete = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sql.set_db_version(73).await?;
|
||||
}
|
||||
if dbversion < 74 {
|
||||
info!(context, "[migration] v74");
|
||||
sql.execute_migration("UPDATE contacts SET name='' WHERE name=authname", 74)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 75 {
|
||||
info!(context, "[migration] v75");
|
||||
sql.execute_migration(
|
||||
"ALTER TABLE contacts ADD COLUMN status TEXT DEFAULT '';",
|
||||
74,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
if dbversion < 76 {
|
||||
info!(context, "[migration] v76");
|
||||
sql.execute_migration("ALTER TABLE msgs ADD COLUMN subject TEXT DEFAULT '';", 76)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok((recalc_fingerprints, update_icons, disable_server_delete))
|
||||
}
|
||||
|
||||
impl Sql {
|
||||
async fn set_db_version(&self, version: i32) -> Result<()> {
|
||||
self.set_raw_config_int(VERSION_CFG, version).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn execute_migration(&self, query: &'static str, version: i32) -> Result<()> {
|
||||
let query = sqlx::query(query);
|
||||
self.transaction(move |conn| {
|
||||
Box::pin(async move {
|
||||
query
|
||||
.execute_many(&mut *conn)
|
||||
.await
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.await?;
|
||||
|
||||
// set raw config inside the transaction
|
||||
sqlx::query("UPDATE config SET value=? WHERE keyname=?;")
|
||||
.bind(format!("{}", version))
|
||||
.bind(VERSION_CFG)
|
||||
.execute(&mut *conn)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
866
src/sql/mod.rs
Normal file
866
src/sql/mod.rs
Normal file
@@ -0,0 +1,866 @@
|
||||
//! # SQLite wrapper
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context as _;
|
||||
use async_std::prelude::*;
|
||||
use async_std::sync::RwLock;
|
||||
use sqlx::{
|
||||
pool::PoolOptions,
|
||||
sqlite::{Sqlite, SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqliteSynchronous},
|
||||
Execute, Executor, Row,
|
||||
};
|
||||
|
||||
use crate::chat::{add_device_msg, update_device_icon, update_saved_messages_icon};
|
||||
use crate::config::Config;
|
||||
use crate::constants::{Viewtype, DC_CHAT_ID_TRASH};
|
||||
use crate::context::Context;
|
||||
use crate::dc_tools::{dc_delete_file, time};
|
||||
use crate::ephemeral::start_ephemeral_timers;
|
||||
use crate::message::Message;
|
||||
use crate::param::{Param, Params};
|
||||
use crate::peerstate::Peerstate;
|
||||
use crate::stock_str;
|
||||
|
||||
mod error;
|
||||
mod migrations;
|
||||
|
||||
pub use self::error::*;
|
||||
|
||||
/// A wrapper around the underlying Sqlite3 object.
|
||||
///
|
||||
/// We maintain two different pools to sqlite, on for reading, one for writing.
|
||||
/// This can go away once https://github.com/launchbadge/sqlx/issues/459 is implemented.
|
||||
#[derive(Debug)]
|
||||
pub struct Sql {
|
||||
/// Writer pool, must only have 1 connection in it.
|
||||
writer: RwLock<Option<SqlitePool>>,
|
||||
/// Reader pool, maintains multiple connections for reading data.
|
||||
reader: RwLock<Option<SqlitePool>>,
|
||||
}
|
||||
|
||||
impl Default for Sql {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
writer: RwLock::new(None),
|
||||
reader: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Sql {
|
||||
fn drop(&mut self) {
|
||||
async_std::task::block_on(self.close());
|
||||
}
|
||||
}
|
||||
|
||||
impl Sql {
|
||||
pub fn new() -> Sql {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Checks if there is currently a connection to the underlying Sqlite database.
|
||||
pub async fn is_open(&self) -> bool {
|
||||
// in read only mode the writer does not exists
|
||||
self.reader.read().await.is_some()
|
||||
}
|
||||
|
||||
/// Closes all underlying Sqlite connections.
|
||||
pub async fn close(&self) {
|
||||
if let Some(sql) = self.writer.write().await.take() {
|
||||
sql.close().await;
|
||||
}
|
||||
if let Some(sql) = self.reader.write().await.take() {
|
||||
sql.close().await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn new_writer_pool(dbfile: impl AsRef<Path>) -> sqlx::Result<SqlitePool> {
|
||||
let config = SqliteConnectOptions::new()
|
||||
.journal_mode(SqliteJournalMode::Wal)
|
||||
.filename(dbfile.as_ref())
|
||||
.read_only(false)
|
||||
.busy_timeout(Duration::from_secs(100))
|
||||
.create_if_missing(true)
|
||||
.statement_cache_capacity(0) // XXX workaround for https://github.com/launchbadge/sqlx/issues/1147
|
||||
.synchronous(SqliteSynchronous::Normal);
|
||||
|
||||
PoolOptions::<Sqlite>::new()
|
||||
.max_connections(1)
|
||||
.after_connect(|conn| {
|
||||
Box::pin(async move {
|
||||
let q = r#"
|
||||
PRAGMA secure_delete=on;
|
||||
PRAGMA temp_store=memory; -- Avoid SQLITE_IOERR_GETTEMPPATH errors on Android
|
||||
"#;
|
||||
|
||||
conn.execute_many(sqlx::query(q))
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.connect_with(config)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn new_reader_pool(dbfile: impl AsRef<Path>, readonly: bool) -> sqlx::Result<SqlitePool> {
|
||||
let config = SqliteConnectOptions::new()
|
||||
.journal_mode(SqliteJournalMode::Wal)
|
||||
.filename(dbfile.as_ref())
|
||||
.read_only(readonly)
|
||||
.busy_timeout(Duration::from_secs(100))
|
||||
.statement_cache_capacity(0) // XXX workaround for https://github.com/launchbadge/sqlx/issues/1147
|
||||
.synchronous(SqliteSynchronous::Normal);
|
||||
|
||||
PoolOptions::<Sqlite>::new()
|
||||
.max_connections(10)
|
||||
.after_connect(|conn| {
|
||||
Box::pin(async move {
|
||||
let q = r#"
|
||||
PRAGMA temp_store=memory; -- Avoid SQLITE_IOERR_GETTEMPPATH errors on Android
|
||||
PRAGMA query_only=1; -- Protect against writes even in read-write mode
|
||||
"#;
|
||||
|
||||
conn.execute_many(sqlx::query(q))
|
||||
.collect::<std::result::Result<Vec<_>, _>>()
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.connect_with(config)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Opens the provided database and runs any necessary migrations.
|
||||
/// If a database is already open, this will return an error.
|
||||
pub async fn open(
|
||||
&self,
|
||||
context: &Context,
|
||||
dbfile: impl AsRef<Path>,
|
||||
readonly: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
if self.is_open().await {
|
||||
error!(
|
||||
context,
|
||||
"Cannot open, database \"{:?}\" already opened.",
|
||||
dbfile.as_ref(),
|
||||
);
|
||||
return Err(Error::SqlAlreadyOpen.into());
|
||||
}
|
||||
|
||||
// Open write pool
|
||||
if !readonly {
|
||||
*self.writer.write().await = Some(Self::new_writer_pool(&dbfile).await?);
|
||||
}
|
||||
|
||||
// Open read pool
|
||||
*self.reader.write().await = Some(Self::new_reader_pool(&dbfile, readonly).await?);
|
||||
|
||||
if !readonly {
|
||||
// (1) update low-level database structure.
|
||||
// this should be done before updates that use high-level objects that
|
||||
// rely themselves on the low-level structure.
|
||||
|
||||
let (recalc_fingerprints, update_icons, disable_server_delete) =
|
||||
migrations::run(context, self).await?;
|
||||
|
||||
// (2) updates that require high-level objects
|
||||
// the structure is complete now and all objects are usable
|
||||
|
||||
if recalc_fingerprints {
|
||||
info!(context, "[migration] recalc fingerprints");
|
||||
let mut rows = self.fetch("SELECT addr FROM acpeerstates;").await?;
|
||||
|
||||
while let Some(row) = rows.next().await {
|
||||
let row = row?;
|
||||
let addr = row.try_get(0)?;
|
||||
if let Some(ref mut peerstate) = Peerstate::from_addr(context, addr).await? {
|
||||
peerstate.recalc_fingerprint();
|
||||
peerstate.save_to_db(self, false).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if update_icons {
|
||||
update_saved_messages_icon(context).await?;
|
||||
update_device_icon(context).await?;
|
||||
}
|
||||
|
||||
if disable_server_delete {
|
||||
// We now always watch all folders and delete messages there if delete_server is enabled.
|
||||
// So, for people who have delete_server enabled, disable it and add a hint to the devicechat:
|
||||
if context.get_config_delete_server_after().await?.is_some() {
|
||||
let mut msg = Message::new(Viewtype::Text);
|
||||
msg.text = Some(stock_str::delete_server_turned_off(context).await);
|
||||
add_device_msg(context, None, Some(&mut msg)).await?;
|
||||
context
|
||||
.set_config(Config::DeleteServerAfter, Some("0"))
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(context, "Opened {:?}.", dbfile.as_ref());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute the given query, returning the number of affected rows.
|
||||
pub async fn execute<'e, 'q, E>(&self, query: E) -> Result<u64>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
{
|
||||
let lock = self.writer.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
let rows = pool.execute(query).await?;
|
||||
Ok(rows.rows_affected())
|
||||
}
|
||||
|
||||
/// Execute many queries.
|
||||
pub async fn execute_many<'e, 'q, E>(&self, query: E) -> Result<()>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
{
|
||||
let lock = self.writer.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
pool.execute_many(query)
|
||||
.collect::<sqlx::Result<Vec<_>>>()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch the given query.
|
||||
pub async fn fetch<'e, 'q, E>(
|
||||
&self,
|
||||
query: E,
|
||||
) -> Result<impl Stream<Item = sqlx::Result<<Sqlite as sqlx::Database>::Row>> + 'e + Send>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
{
|
||||
let lock = self.reader.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
let rows = pool.fetch(query);
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Fetch exactly one row, errors if no row is found.
|
||||
pub async fn fetch_one<'e, 'q, E>(&self, query: E) -> Result<<Sqlite as sqlx::Database>::Row>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
{
|
||||
let lock = self.reader.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
let row = pool.fetch_one(query).await?;
|
||||
Ok(row)
|
||||
}
|
||||
|
||||
/// Fetches at most one row.
|
||||
pub async fn fetch_optional<'e, 'q, E>(
|
||||
&self,
|
||||
query: E,
|
||||
) -> Result<Option<<Sqlite as sqlx::Database>::Row>>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
{
|
||||
let lock = self.reader.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
let row = pool.fetch_optional(query).await?;
|
||||
Ok(row)
|
||||
}
|
||||
|
||||
/// Used for executing `SELECT COUNT` statements only. Returns the resulting count.
|
||||
pub async fn count<'e, 'q, E>(&self, query: E) -> Result<usize>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
{
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let row = self.fetch_one(query).await?;
|
||||
let count: i64 = row.try_get(0)?;
|
||||
|
||||
Ok(usize::try_from(count).map_err::<anyhow::Error, _>(Into::into)?)
|
||||
}
|
||||
|
||||
/// Used for executing `SELECT COUNT` statements only. Returns `true`, if the count is at least
|
||||
/// one, `false` otherwise.
|
||||
pub async fn exists<'e, 'q, E>(&self, query: E) -> Result<bool>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
{
|
||||
let count = self.count(query).await?;
|
||||
Ok(count > 0)
|
||||
}
|
||||
|
||||
/// Execute the function inside a transaction.
|
||||
///
|
||||
/// If the function returns an error, the transaction will be rolled back. If it does not return an
|
||||
/// error, the transaction will be committed.
|
||||
pub async fn transaction<F, R>(&self, callback: F) -> Result<R>
|
||||
where
|
||||
F: for<'c> FnOnce(
|
||||
&'c mut sqlx::Transaction<'_, Sqlite>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<R>> + 'c + Send>>
|
||||
+ 'static
|
||||
+ Send
|
||||
+ Sync,
|
||||
R: Send,
|
||||
{
|
||||
let lock = self.writer.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
let mut transaction = pool.begin().await?;
|
||||
let ret = callback(&mut transaction).await;
|
||||
|
||||
match ret {
|
||||
Ok(ret) => {
|
||||
transaction.commit().await?;
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
Err(err) => {
|
||||
transaction.rollback().await?;
|
||||
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Query the database if the requested table already exists.
|
||||
pub async fn table_exists(&self, name: impl AsRef<str>) -> Result<bool> {
|
||||
let q = format!("PRAGMA table_info(\"{}\")", name.as_ref());
|
||||
|
||||
let lock = self.reader.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
let mut rows = pool.fetch(sqlx::query(&q));
|
||||
if let Some(first_row) = rows.next().await {
|
||||
Ok(first_row.is_ok())
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a column exists in a given table.
|
||||
pub async fn col_exists(
|
||||
&self,
|
||||
table_name: impl AsRef<str>,
|
||||
col_name: impl AsRef<str>,
|
||||
) -> Result<bool> {
|
||||
let q = format!("PRAGMA table_info(\"{}\")", table_name.as_ref());
|
||||
let lock = self.reader.read().await;
|
||||
let pool = lock.as_ref().ok_or(Error::SqlNoConnection)?;
|
||||
|
||||
let mut rows = pool.fetch(sqlx::query(&q));
|
||||
while let Some(row) = rows.next().await {
|
||||
let row = row?;
|
||||
|
||||
// `PRAGMA table_info` returns one row per column,
|
||||
// each row containing 0=cid, 1=name, 2=type, 3=notnull, 4=dflt_value
|
||||
|
||||
let curr_name: &str = row.try_get(1)?;
|
||||
if col_name.as_ref() == curr_name {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Executes a query which is expected to return one row and one
|
||||
/// column. If the query does not return a value or returns SQL
|
||||
/// `NULL`, returns `Ok(None)`.
|
||||
pub async fn query_get_value<'e, 'q, E, T>(&self, query: E) -> Result<Option<T>>
|
||||
where
|
||||
'q: 'e,
|
||||
E: 'q + Execute<'q, Sqlite>,
|
||||
T: for<'r> sqlx::Decode<'r, Sqlite> + sqlx::Type<Sqlite>,
|
||||
{
|
||||
let res = self
|
||||
.fetch_optional(query)
|
||||
.await?
|
||||
.map(|row| row.get::<T, _>(0));
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Set private configuration options.
|
||||
///
|
||||
/// Setting `None` deletes the value. On failure an error message
|
||||
/// will already have been logged.
|
||||
pub async fn set_raw_config(&self, key: impl AsRef<str>, value: Option<&str>) -> Result<()> {
|
||||
if !self.is_open().await {
|
||||
return Err(Error::SqlNoConnection);
|
||||
}
|
||||
|
||||
let key = key.as_ref();
|
||||
if let Some(value) = value {
|
||||
let exists = self
|
||||
.exists(sqlx::query("SELECT COUNT(*) FROM config WHERE keyname=?;").bind(key))
|
||||
.await?;
|
||||
|
||||
if exists {
|
||||
self.execute(
|
||||
sqlx::query("UPDATE config SET value=? WHERE keyname=?;")
|
||||
.bind(value)
|
||||
.bind(key),
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
self.execute(
|
||||
sqlx::query("INSERT INTO config (keyname, value) VALUES (?, ?);")
|
||||
.bind(key)
|
||||
.bind(value),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
} else {
|
||||
self.execute(sqlx::query("DELETE FROM config WHERE keyname=?;").bind(key))
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get configuration options from the database.
|
||||
pub async fn get_raw_config(&self, key: impl AsRef<str>) -> Result<Option<String>> {
|
||||
if !self.is_open().await || key.as_ref().is_empty() {
|
||||
return Err(Error::SqlNoConnection);
|
||||
}
|
||||
let value = self
|
||||
.query_get_value(
|
||||
sqlx::query("SELECT value FROM config WHERE keyname=?;").bind(key.as_ref()),
|
||||
)
|
||||
.await
|
||||
.context(format!("failed to fetch raw config: {}", key.as_ref()))?;
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
pub async fn set_raw_config_int(&self, key: impl AsRef<str>, value: i32) -> Result<()> {
|
||||
self.set_raw_config(key, Some(&format!("{}", value))).await
|
||||
}
|
||||
|
||||
pub async fn get_raw_config_int(&self, key: impl AsRef<str>) -> Result<Option<i32>> {
|
||||
self.get_raw_config(key)
|
||||
.await
|
||||
.map(|s| s.and_then(|s| s.parse().ok()))
|
||||
}
|
||||
|
||||
pub async fn get_raw_config_bool(&self, key: impl AsRef<str>) -> Result<bool> {
|
||||
// Not the most obvious way to encode bool as string, but it is matter
|
||||
// of backward compatibility.
|
||||
let res = self.get_raw_config_int(key).await?;
|
||||
Ok(res.unwrap_or_default() > 0)
|
||||
}
|
||||
|
||||
pub async fn set_raw_config_bool<T>(&self, key: T, value: bool) -> Result<()>
|
||||
where
|
||||
T: AsRef<str>,
|
||||
{
|
||||
let value = if value { Some("1") } else { None };
|
||||
self.set_raw_config(key, value).await
|
||||
}
|
||||
|
||||
pub async fn set_raw_config_int64(&self, key: impl AsRef<str>, value: i64) -> Result<()> {
|
||||
self.set_raw_config(key, Some(&format!("{}", value))).await
|
||||
}
|
||||
|
||||
pub async fn get_raw_config_int64(&self, key: impl AsRef<str>) -> Result<Option<i64>> {
|
||||
self.get_raw_config(key)
|
||||
.await
|
||||
.map(|s| s.and_then(|r| r.parse().ok()))
|
||||
}
|
||||
|
||||
/// Alternative to sqlite3_last_insert_rowid() which MUST NOT be used due to race conditions, see comment above.
|
||||
/// the ORDER BY ensures, this function always returns the most recent id,
|
||||
/// eg. if a Message-ID is split into different messages.
|
||||
pub async fn get_rowid(
|
||||
&self,
|
||||
table: impl AsRef<str>,
|
||||
field: impl AsRef<str>,
|
||||
value: impl AsRef<str>,
|
||||
) -> Result<i64> {
|
||||
// alternative to sqlite3_last_insert_rowid() which MUST NOT be used due to race conditions, see comment above.
|
||||
// the ORDER BY ensures, this function always returns the most recent id,
|
||||
// eg. if a Message-ID is split into different messages.
|
||||
let query = format!(
|
||||
"SELECT id FROM {} WHERE {}=? ORDER BY id DESC",
|
||||
table.as_ref(),
|
||||
field.as_ref(),
|
||||
);
|
||||
|
||||
self.query_get_value(sqlx::query(&query).bind(value.as_ref()))
|
||||
.await
|
||||
.map(|id| id.unwrap_or_default())
|
||||
}
|
||||
|
||||
/// Fetches the rowid by restricting the rows through two different key, value settings.
|
||||
pub async fn get_rowid2(
|
||||
&self,
|
||||
table: impl AsRef<str>,
|
||||
field: impl AsRef<str>,
|
||||
value: i64,
|
||||
field2: impl AsRef<str>,
|
||||
value2: i64,
|
||||
) -> Result<i64> {
|
||||
let query = format!(
|
||||
"SELECT id FROM {} WHERE {}={} AND {}={} ORDER BY id DESC",
|
||||
table.as_ref(),
|
||||
field.as_ref(),
|
||||
value,
|
||||
field2.as_ref(),
|
||||
value2,
|
||||
);
|
||||
|
||||
self.query_get_value(sqlx::query(&query))
|
||||
.await
|
||||
.map(|id| id.unwrap_or_default())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn housekeeping(context: &Context) -> Result<()> {
|
||||
if let Err(err) = crate::ephemeral::delete_expired_messages(context).await {
|
||||
warn!(context, "Failed to delete expired messages: {}", err);
|
||||
}
|
||||
|
||||
let mut files_in_use = HashSet::new();
|
||||
let mut unreferenced_count = 0;
|
||||
|
||||
info!(context, "Start housekeeping...");
|
||||
maybe_add_from_param(
|
||||
&context.sql,
|
||||
&mut files_in_use,
|
||||
"SELECT param FROM msgs WHERE chat_id!=3 AND type!=10;",
|
||||
Param::File,
|
||||
)
|
||||
.await?;
|
||||
maybe_add_from_param(
|
||||
&context.sql,
|
||||
&mut files_in_use,
|
||||
"SELECT param FROM jobs;",
|
||||
Param::File,
|
||||
)
|
||||
.await?;
|
||||
maybe_add_from_param(
|
||||
&context.sql,
|
||||
&mut files_in_use,
|
||||
"SELECT param FROM chats;",
|
||||
Param::ProfileImage,
|
||||
)
|
||||
.await?;
|
||||
maybe_add_from_param(
|
||||
&context.sql,
|
||||
&mut files_in_use,
|
||||
"SELECT param FROM contacts;",
|
||||
Param::ProfileImage,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut rows = context.sql.fetch("SELECT value FROM config;").await?;
|
||||
while let Some(row) = rows.next().await {
|
||||
let row: String = row?.try_get(0)?;
|
||||
maybe_add_file(&mut files_in_use, row);
|
||||
}
|
||||
|
||||
info!(context, "{} files in use.", files_in_use.len(),);
|
||||
/* go through directory and delete unused files */
|
||||
let p = context.get_blobdir();
|
||||
match async_std::fs::read_dir(p).await {
|
||||
Ok(mut dir_handle) => {
|
||||
/* avoid deletion of files that are just created to build a message object */
|
||||
let diff = std::time::Duration::from_secs(60 * 60);
|
||||
let keep_files_newer_than = std::time::SystemTime::now().checked_sub(diff).unwrap();
|
||||
|
||||
while let Some(entry) = dir_handle.next().await {
|
||||
if entry.is_err() {
|
||||
break;
|
||||
}
|
||||
let entry = entry.unwrap();
|
||||
let name_f = entry.file_name();
|
||||
let name_s = name_f.to_string_lossy();
|
||||
|
||||
if is_file_in_use(&files_in_use, None, &name_s)
|
||||
|| is_file_in_use(&files_in_use, Some(".increation"), &name_s)
|
||||
|| is_file_in_use(&files_in_use, Some(".waveform"), &name_s)
|
||||
|| is_file_in_use(&files_in_use, Some("-preview.jpg"), &name_s)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
unreferenced_count += 1;
|
||||
|
||||
if let Ok(stats) = async_std::fs::metadata(entry.path()).await {
|
||||
let recently_created =
|
||||
stats.created().is_ok() && stats.created().unwrap() > keep_files_newer_than;
|
||||
let recently_modified = stats.modified().is_ok()
|
||||
&& stats.modified().unwrap() > keep_files_newer_than;
|
||||
let recently_accessed = stats.accessed().is_ok()
|
||||
&& stats.accessed().unwrap() > keep_files_newer_than;
|
||||
|
||||
if recently_created || recently_modified || recently_accessed {
|
||||
info!(
|
||||
context,
|
||||
"Housekeeping: Keeping new unreferenced file #{}: {:?}",
|
||||
unreferenced_count,
|
||||
entry.file_name(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
info!(
|
||||
context,
|
||||
"Housekeeping: Deleting unreferenced file #{}: {:?}",
|
||||
unreferenced_count,
|
||||
entry.file_name()
|
||||
);
|
||||
let path = entry.path();
|
||||
dc_delete_file(context, path).await;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
context,
|
||||
"Housekeeping: Cannot open {}. ({})",
|
||||
context.get_blobdir().display(),
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = start_ephemeral_timers(context).await {
|
||||
warn!(
|
||||
context,
|
||||
"Housekeeping: cannot start ephemeral timers: {}", err
|
||||
);
|
||||
}
|
||||
|
||||
if let Err(err) = prune_tombstones(&context.sql).await {
|
||||
warn!(
|
||||
context,
|
||||
"Housekeeping: Cannot prune message tombstones: {}", err
|
||||
);
|
||||
}
|
||||
|
||||
if let Err(e) = context
|
||||
.set_config(Config::LastHousekeeping, Some(&time().to_string()))
|
||||
.await
|
||||
{
|
||||
warn!(context, "Can't set config: {}", e);
|
||||
}
|
||||
|
||||
info!(context, "Housekeeping done.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::indexing_slicing)]
|
||||
fn is_file_in_use(files_in_use: &HashSet<String>, namespc_opt: Option<&str>, name: &str) -> bool {
|
||||
let name_to_check = if let Some(namespc) = namespc_opt {
|
||||
let name_len = name.len();
|
||||
let namespc_len = namespc.len();
|
||||
if name_len <= namespc_len || !name.ends_with(namespc) {
|
||||
return false;
|
||||
}
|
||||
&name[..name_len - namespc_len]
|
||||
} else {
|
||||
name
|
||||
};
|
||||
files_in_use.contains(name_to_check)
|
||||
}
|
||||
|
||||
fn maybe_add_file(files_in_use: &mut HashSet<String>, file: impl AsRef<str>) {
|
||||
if let Some(file) = file.as_ref().strip_prefix("$BLOBDIR/") {
|
||||
files_in_use.insert(file.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
async fn maybe_add_from_param(
|
||||
sql: &Sql,
|
||||
files_in_use: &mut HashSet<String>,
|
||||
query: &str,
|
||||
param_id: Param,
|
||||
) -> Result<()> {
|
||||
let mut rows = sql.fetch(query).await?;
|
||||
while let Some(row) = rows.next().await {
|
||||
let row: String = row?.try_get(0)?;
|
||||
let param: Params = row.parse().unwrap_or_default();
|
||||
if let Some(file) = param.get(param_id) {
|
||||
maybe_add_file(files_in_use, file);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes from the database locally deleted messages that also don't
|
||||
/// have a server UID.
|
||||
async fn prune_tombstones(sql: &Sql) -> Result<()> {
|
||||
sql.execute(
|
||||
sqlx::query(
|
||||
"DELETE FROM msgs \
|
||||
WHERE (chat_id = ? OR hidden) \
|
||||
AND server_uid = 0",
|
||||
)
|
||||
.bind(DC_CHAT_ID_TRASH),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the SQLite version as a string; e.g., `"3.16.2"` for version 3.16.2.
|
||||
pub fn version() -> &'static str {
|
||||
#[allow(unsafe_code)]
|
||||
let cstr = unsafe { std::ffi::CStr::from_ptr(libsqlite3_sys::sqlite3_libversion()) };
|
||||
cstr.to_str()
|
||||
.expect("SQLite version string is not valid UTF8 ?!")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use async_std::fs::File;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::{test_utils::TestContext, Event, EventType};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_maybe_add_file() {
|
||||
let mut files = Default::default();
|
||||
maybe_add_file(&mut files, "$BLOBDIR/hello");
|
||||
maybe_add_file(&mut files, "$BLOBDIR/world.txt");
|
||||
maybe_add_file(&mut files, "world2.txt");
|
||||
maybe_add_file(&mut files, "$BLOBDIR");
|
||||
|
||||
assert!(files.contains("hello"));
|
||||
assert!(files.contains("world.txt"));
|
||||
assert!(!files.contains("world2.txt"));
|
||||
assert!(!files.contains("$BLOBDIR"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_file_in_use() {
|
||||
let mut files = Default::default();
|
||||
maybe_add_file(&mut files, "$BLOBDIR/hello");
|
||||
maybe_add_file(&mut files, "$BLOBDIR/world.txt");
|
||||
maybe_add_file(&mut files, "world2.txt");
|
||||
|
||||
assert!(is_file_in_use(&files, None, "hello"));
|
||||
assert!(!is_file_in_use(&files, Some(".txt"), "hello"));
|
||||
assert!(is_file_in_use(&files, Some("-suffix"), "world.txt-suffix"));
|
||||
}
|
||||
|
||||
#[async_std::test]
|
||||
async fn test_table_exists() {
|
||||
let t = TestContext::new().await;
|
||||
assert!(t.ctx.sql.table_exists("msgs").await.unwrap());
|
||||
assert!(!t.ctx.sql.table_exists("foobar").await.unwrap());
|
||||
}
|
||||
|
||||
#[async_std::test]
|
||||
async fn test_col_exists() {
|
||||
let t = TestContext::new().await;
|
||||
assert!(t.ctx.sql.col_exists("msgs", "mime_modified").await.unwrap());
|
||||
assert!(!t.ctx.sql.col_exists("msgs", "foobar").await.unwrap());
|
||||
assert!(!t.ctx.sql.col_exists("foobar", "foobar").await.unwrap());
|
||||
}
|
||||
|
||||
#[async_std::test]
|
||||
async fn test_housekeeping_db_closed() {
|
||||
let t = TestContext::new().await;
|
||||
|
||||
let avatar_src = t.dir.path().join("avatar.png");
|
||||
let avatar_bytes = include_bytes!("../../test-data/image/avatar64x64.png");
|
||||
File::create(&avatar_src)
|
||||
.await
|
||||
.unwrap()
|
||||
.write_all(avatar_bytes)
|
||||
.await
|
||||
.unwrap();
|
||||
t.set_config(Config::Selfavatar, Some(avatar_src.to_str().unwrap()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
t.add_event_sink(move |event: Event| async move {
|
||||
match event.typ {
|
||||
EventType::Info(s) => assert!(
|
||||
!s.contains("Keeping new unreferenced file"),
|
||||
"File {} was almost deleted, only reason it was kept is that it was created recently (as the tests don't run for a long time)",
|
||||
s
|
||||
),
|
||||
EventType::Error(s) => panic!(s),
|
||||
_ => {}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
let a = t.get_config(Config::Selfavatar).await.unwrap().unwrap();
|
||||
assert_eq!(avatar_bytes, &async_std::fs::read(&a).await.unwrap()[..]);
|
||||
|
||||
t.sql.close().await;
|
||||
housekeeping(&t).await.unwrap_err(); // housekeeping should fail as the db is closed
|
||||
t.sql.open(&t, &t.get_dbfile(), false).await.unwrap();
|
||||
|
||||
let a = t.get_config(Config::Selfavatar).await.unwrap().unwrap();
|
||||
assert_eq!(avatar_bytes, &async_std::fs::read(&a).await.unwrap()[..]);
|
||||
}
|
||||
|
||||
/// Regression test.
|
||||
///
|
||||
/// Previously the code checking for existence of `config` table
|
||||
/// checked it with `PRAGMA table_info("config")` but did not
|
||||
/// drain `SqlitePool.fetch` result, only using the first row
|
||||
/// returned. As a result, prepared statement for `PRAGMA` was not
|
||||
/// finalized early enough, leaving reader connection in a broken
|
||||
/// state after reopening the database, when `config` table
|
||||
/// existed and `PRAGMA` returned non-empty result.
|
||||
///
|
||||
/// Statements were not finalized due to a bug in sqlx:
|
||||
/// https://github.com/launchbadge/sqlx/issues/1147
|
||||
#[async_std::test]
|
||||
async fn test_db_reopen() -> Result<()> {
|
||||
use tempfile::tempdir;
|
||||
|
||||
// The context is used only for logging.
|
||||
let t = TestContext::new().await;
|
||||
|
||||
// Create a separate empty database for testing.
|
||||
let dir = tempdir()?;
|
||||
let dbfile = dir.path().join("testdb.sqlite");
|
||||
let sql = Sql::new();
|
||||
|
||||
// Create database with all the tables.
|
||||
sql.open(&t, &dbfile, false).await.unwrap();
|
||||
sql.close().await;
|
||||
|
||||
// Reopen the database
|
||||
sql.open(&t, &dbfile, false).await?;
|
||||
sql.execute(
|
||||
sqlx::query("INSERT INTO config (keyname, value) VALUES (?, ?);")
|
||||
.bind("foo")
|
||||
.bind("bar"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let value: Option<String> = sql
|
||||
.query_get_value(sqlx::query("SELECT value FROM config WHERE keyname=?;").bind("foo"))
|
||||
.await?;
|
||||
assert_eq!(value.unwrap(), "bar");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
185
src/sql/tables.sql
Normal file
185
src/sql/tables.sql
Normal file
@@ -0,0 +1,185 @@
|
||||
CREATE TABLE config (
|
||||
id INTEGER PRIMARY KEY,
|
||||
keyname TEXT,
|
||||
value TEXT
|
||||
);
|
||||
CREATE INDEX config_index1 ON config (keyname);
|
||||
CREATE TABLE contacts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT DEFAULT '',
|
||||
addr TEXT DEFAULT '' COLLATE NOCASE,
|
||||
origin INTEGER DEFAULT 0,
|
||||
blocked INTEGER DEFAULT 0,
|
||||
last_seen INTEGER DEFAULT 0,
|
||||
param TEXT DEFAULT '',
|
||||
authname TEXT DEFAULT '',
|
||||
selfavatar_sent INTEGER DEFAULT 0
|
||||
);
|
||||
CREATE INDEX contacts_index1 ON contacts (name COLLATE NOCASE);
|
||||
CREATE INDEX contacts_index2 ON contacts (addr COLLATE NOCASE);
|
||||
INSERT INTO contacts (id,name,origin) VALUES
|
||||
(1,'self',262144), (2,'info',262144), (3,'rsvd',262144),
|
||||
(4,'rsvd',262144), (5,'device',262144), (6,'rsvd',262144),
|
||||
(7,'rsvd',262144), (8,'rsvd',262144), (9,'rsvd',262144);
|
||||
|
||||
CREATE TABLE chats (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type INTEGER DEFAULT 0,
|
||||
name TEXT DEFAULT '',
|
||||
draft_timestamp INTEGER DEFAULT 0,
|
||||
draft_txt TEXT DEFAULT '',
|
||||
blocked INTEGER DEFAULT 0,
|
||||
grpid TEXT DEFAULT '',
|
||||
param TEXT DEFAULT '',
|
||||
archived INTEGER DEFAULT 0,
|
||||
gossiped_timestamp INTEGER DEFAULT 0,
|
||||
locations_send_begin INTEGER DEFAULT 0,
|
||||
locations_send_until INTEGER DEFAULT 0,
|
||||
locations_last_sent INTEGER DEFAULT 0,
|
||||
created_timestamp INTEGER DEFAULT 0,
|
||||
muted_until INTEGER DEFAULT 0,
|
||||
ephemeral_timer INTEGER
|
||||
);
|
||||
CREATE INDEX chats_index1 ON chats (grpid);
|
||||
CREATE INDEX chats_index2 ON chats (archived);
|
||||
CREATE INDEX chats_index3 ON chats (locations_send_until);
|
||||
INSERT INTO chats (id,type,name) VALUES
|
||||
(1,120,'deaddrop'), (2,120,'rsvd'), (3,120,'trash'),
|
||||
(4,120,'msgs_in_creation'), (5,120,'starred'), (6,120,'archivedlink'),
|
||||
(7,100,'rsvd'), (8,100,'rsvd'), (9,100,'rsvd');
|
||||
|
||||
CREATE TABLE chats_contacts (chat_id INTEGER, contact_id INTEGER);
|
||||
CREATE INDEX chats_contacts_index1 ON chats_contacts (chat_id);
|
||||
CREATE INDEX chats_contacts_index2 ON chats_contacts (contact_id);
|
||||
|
||||
CREATE TABLE msgs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
rfc724_mid TEXT DEFAULT '',
|
||||
server_folder TEXT DEFAULT '',
|
||||
server_uid INTEGER DEFAULT 0,
|
||||
chat_id INTEGER DEFAULT 0,
|
||||
from_id INTEGER DEFAULT 0,
|
||||
to_id INTEGER DEFAULT 0,
|
||||
timestamp INTEGER DEFAULT 0,
|
||||
type INTEGER DEFAULT 0,
|
||||
state INTEGER DEFAULT 0,
|
||||
msgrmsg INTEGER DEFAULT 1,
|
||||
bytes INTEGER DEFAULT 0,
|
||||
txt TEXT DEFAULT '',
|
||||
txt_raw TEXT DEFAULT '',
|
||||
param TEXT DEFAULT '',
|
||||
starred INTEGER DEFAULT 0,
|
||||
timestamp_sent INTEGER DEFAULT 0,
|
||||
timestamp_rcvd INTEGER DEFAULT 0,
|
||||
hidden INTEGER DEFAULT 0,
|
||||
mime_headers TEXT,
|
||||
mime_in_reply_to TEXT,
|
||||
mime_references TEXT,
|
||||
move_state INTEGER DEFAULT 1,
|
||||
location_id INTEGER DEFAULT 0,
|
||||
error TEXT DEFAULT '',
|
||||
|
||||
-- Timer value in seconds. For incoming messages this
|
||||
-- timer starts when message is read, so we want to have
|
||||
-- the value stored here until the timer starts.
|
||||
ephemeral_timer INTEGER DEFAULT 0,
|
||||
|
||||
-- Timestamp indicating when the message should be
|
||||
-- deleted. It is convenient to store it here because UI
|
||||
-- needs this value to display how much time is left until
|
||||
-- the message is deleted.
|
||||
ephemeral_timestamp INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX msgs_index1 ON msgs (rfc724_mid);
|
||||
CREATE INDEX msgs_index2 ON msgs (chat_id);
|
||||
CREATE INDEX msgs_index3 ON msgs (timestamp);
|
||||
CREATE INDEX msgs_index4 ON msgs (state);
|
||||
CREATE INDEX msgs_index5 ON msgs (starred);
|
||||
CREATE INDEX msgs_index6 ON msgs (location_id);
|
||||
CREATE INDEX msgs_index7 ON msgs (state, hidden, chat_id);
|
||||
INSERT INTO msgs (id,msgrmsg,txt) VALUES
|
||||
(1,0,'marker1'), (2,0,'rsvd'), (3,0,'rsvd'),
|
||||
(4,0,'rsvd'), (5,0,'rsvd'), (6,0,'rsvd'), (7,0,'rsvd'),
|
||||
(8,0,'rsvd'), (9,0,'daymarker');
|
||||
|
||||
CREATE TABLE jobs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
added_timestamp INTEGER,
|
||||
desired_timestamp INTEGER DEFAULT 0,
|
||||
action INTEGER,
|
||||
foreign_id INTEGER,
|
||||
param TEXT DEFAULT '',
|
||||
thread INTEGER DEFAULT 0,
|
||||
tries INTEGER DEFAULT 0
|
||||
);
|
||||
CREATE INDEX jobs_index1 ON jobs (desired_timestamp);
|
||||
|
||||
CREATE TABLE leftgrps (
|
||||
id INTEGER PRIMARY KEY,
|
||||
grpid TEXT DEFAULT ''
|
||||
);
|
||||
CREATE INDEX leftgrps_index1 ON leftgrps (grpid);
|
||||
|
||||
CREATE TABLE keypairs (
|
||||
id INTEGER PRIMARY KEY,
|
||||
addr TEXT DEFAULT '' COLLATE NOCASE,
|
||||
is_default INTEGER DEFAULT 0,
|
||||
private_key,
|
||||
public_key,
|
||||
created INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE acpeerstates (
|
||||
id INTEGER PRIMARY KEY,
|
||||
addr TEXT DEFAULT '' COLLATE NOCASE,
|
||||
last_seen INTEGER DEFAULT 0,
|
||||
last_seen_autocrypt INTEGER DEFAULT 0,
|
||||
public_key,
|
||||
prefer_encrypted INTEGER DEFAULT 0,
|
||||
gossip_timestamp INTEGER DEFAULT 0,
|
||||
gossip_key,
|
||||
public_key_fingerprint TEXT DEFAULT '',
|
||||
gossip_key_fingerprint TEXT DEFAULT '',
|
||||
verified_key,
|
||||
verified_key_fingerprint TEXT DEFAULT ''
|
||||
);
|
||||
CREATE INDEX acpeerstates_index1 ON acpeerstates (addr);
|
||||
CREATE INDEX acpeerstates_index3 ON acpeerstates (public_key_fingerprint);
|
||||
CREATE INDEX acpeerstates_index4 ON acpeerstates (gossip_key_fingerprint);
|
||||
CREATE INDEX acpeerstates_index5 ON acpeerstates (verified_key_fingerprint);
|
||||
|
||||
CREATE TABLE msgs_mdns (
|
||||
msg_id INTEGER,
|
||||
contact_id INTEGER,
|
||||
timestamp_sent INTEGER DEFAULT 0
|
||||
);
|
||||
CREATE INDEX msgs_mdns_index1 ON msgs_mdns (msg_id);
|
||||
|
||||
CREATE TABLE tokens (
|
||||
id INTEGER PRIMARY KEY,
|
||||
namespc INTEGER DEFAULT 0,
|
||||
foreign_id INTEGER DEFAULT 0,
|
||||
token TEXT DEFAULT '',
|
||||
timestamp INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE TABLE locations (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
latitude REAL DEFAULT 0.0,
|
||||
longitude REAL DEFAULT 0.0,
|
||||
accuracy REAL DEFAULT 0.0,
|
||||
timestamp INTEGER DEFAULT 0,
|
||||
chat_id INTEGER DEFAULT 0,
|
||||
from_id INTEGER DEFAULT 0,
|
||||
independent INTEGER DEFAULT 0
|
||||
);
|
||||
CREATE INDEX locations_index1 ON locations (from_id);
|
||||
CREATE INDEX locations_index2 ON locations (timestamp);
|
||||
|
||||
CREATE TABLE devmsglabels (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
label TEXT,
|
||||
msg_id INTEGER DEFAULT 0
|
||||
);
|
||||
CREATE INDEX devmsglabels_index1 ON devmsglabels (label);
|
||||
Reference in New Issue
Block a user