diff --git a/.gitignore b/.gitignore index 773a38c4af..208e697955 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ .DS_Store target *.bak -workspace/server/public +# workspace/server/public diff --git a/Cargo.lock b/Cargo.lock index a5770b62d3..9d71650353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -152,6 +152,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-recursion" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.3" @@ -973,6 +984,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "filetime" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "windows-sys 0.36.1", +] + [[package]] name = "fixed-hash" version = "0.7.0" @@ -2790,6 +2813,8 @@ dependencies = [ name = "sos-client" version = "0.1.0" dependencies = [ + "async-recursion", + "async-trait", "base64", "clap", "dirs", @@ -2831,6 +2856,7 @@ dependencies = [ "chacha20poly1305", "chbs", "file-guard", + "filetime", "getrandom", "hex", "k256", @@ -2845,6 +2871,7 @@ dependencies = [ "tempfile", "thiserror", "time", + "tracing", "url", "uuid 1.1.0", "web3-signature", @@ -3072,6 +3099,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" dependencies = [ + "itoa", "libc", "num_threads", "serde", diff --git a/Makefile b/Makefile index 9956f3db16..b6ae6c7f78 100644 --- a/Makefile +++ b/Makefile @@ -1,28 +1,37 @@ -all: lint fmt check +dev-certs: + @cd sandbox && mkcert -key-file key.pem -cert-file cert.pem localhost 127.0.0.1 ::1 +.PHONY: dev-certs browser-gui: @rm -rf workspace/server/public @cd ../browser && make dist @cp -r ../browser/app/dist workspace/server/public - @rm -rf workspace/server/public/assets +.PHONY: browser-gui + +dev-server: + @cd workspace/server && cargo run -- -c ../../sandbox/config.toml server-release: browser-gui @cd workspace/server && cargo build --release +.PHONY: server-release fmt: @cargo fmt --all +.PHONY: fmt dev: @cargo test --all @cargo fmt --all +.PHONY: dev check: @cargo check --all +.PHONY: check test: @cargo test --all +.PHONY: test docs: @cargo doc --all --open --no-deps - -.PHONY: all browser-gui server-release fmt dev check test docs +.PHONY: docs diff --git a/README.md b/README.md index 73ec9c42ed..755fcf415c 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,50 @@ # Save Our Secrets Library -> A distributed, encrypted database with a focus on security, integrity and redundancy. +> Save our Secrets (SOS) is a cross-platform, distributed, encrypted database with a focus on security, integrity and redundancy. It can be used to store private secrets such as account passwords, notes, certificates and encryption keys. -Used to store private secrets such as account passwords, notes, certificates and encryption keys. +This repository contains the core library code and several command line interface (CLI) tools. + +* [sandbox](/sandbox) Configuration and storage location for local testing. +* [workspace](/workspace) Libraries and command line interfaces. + * [audit](/workspace/audit) The `sos-audit` tool for reading and monitoring audit logs. + * [check](/workspace/check) The `sos-check` tool for verifying file integrity and inspecting files. + * [client](/workspace/client) Client library and the `sos-client` terminal read-eval-print-loop (REPL) tool. + * [core](/workspace/core) Core library including types and traits common to the client and server implementations. + * [readline](/workspace/readline) Utility functions for reading from stdin, used by the terminal REPL client. + * [server](/workspace/server) Server command line interface. + +For webassembly bindings see the [browser][] repository. ## Server +### Certificates + The server requires TLS so a certificate and key file must be configured. -For local development use [mkcert][] in the sandbox directory: +For local development use [mkcert][] in the sandbox directory, first install the executable and then create the certificate authority: ``` mkcert -install -cd sandbox && mkcert -key-file key.pem -cert-file cert.pem localhost 127.0.0.1 ::1 ``` +Afterwards create certificates for local servers in the sandbox directory: + +``` +make dev-certs +``` + +### Web GUI + The server bundles a web-based GUI from the browser webapp code so to run the server CLI tool you must have the [browser][] repository as a sibling folder of this repository and then you can build the public folder containing the bundled assets: ``` make browser-gui ``` -Now you can start a development version of the server: +Now you can start a development version of the server using the [sandbox configuration](/sandbox/config.toml): ``` -cd workspace/server -cargo run -- -c ../../sandbox/config.toml +make dev-server ``` Accounts and vaults will be created in the sandbox directory. diff --git a/workspace/audit/src/iter.rs b/workspace/audit/src/iter.rs index 3d0cc8eb39..818696e8e5 100644 --- a/workspace/audit/src/iter.rs +++ b/workspace/audit/src/iter.rs @@ -2,12 +2,13 @@ use std::path::Path; use sos_core::{ + constants::AUDIT_IDENTITY, events::AuditEvent, - file_identity::{FileIdentity, AUDIT_IDENTITY}, serde_binary::{ binary_rw::{BinaryReader, Endian, FileStream, OpenType, SeekStream}, Decode, Deserializer, }, + FileIdentity, }; use crate::{Error, Result}; diff --git a/workspace/audit/src/lib.rs b/workspace/audit/src/lib.rs index 9380e82228..e43cca0883 100644 --- a/workspace/audit/src/lib.rs +++ b/workspace/audit/src/lib.rs @@ -27,7 +27,7 @@ pub fn logs(audit_log: PathBuf, json: bool) -> Result<()> { AuditData::Vault(vault_id) => { tracing::info!( "{} {} by {} (vault = {})", - log.time, + log.time.to_rfc3339()?, log.operation, log.address, vault_id, @@ -36,7 +36,7 @@ pub fn logs(audit_log: PathBuf, json: bool) -> Result<()> { AuditData::Secret(vault_id, secret_id) => { tracing::info!( "{} {} by {} (vault = {}, secret = {})", - log.time, + log.time.to_rfc3339()?, log.operation, log.address, vault_id, @@ -47,7 +47,7 @@ pub fn logs(audit_log: PathBuf, json: bool) -> Result<()> { } else { tracing::info!( "{} {} by {}", - log.time, + log.time.to_rfc3339()?, log.operation, log.address, ); diff --git a/workspace/audit/src/log_file.rs b/workspace/audit/src/log_file.rs index eb1cba6945..1b3681f984 100644 --- a/workspace/audit/src/log_file.rs +++ b/workspace/audit/src/log_file.rs @@ -4,8 +4,8 @@ use tokio::{fs::File, io::AsyncWriteExt}; use crate::Result; use sos_core::{ + constants::AUDIT_IDENTITY, events::{AuditEvent, AuditProvider}, - file_identity::{FileIdentity, AUDIT_IDENTITY}, vault::encode, }; @@ -38,9 +38,7 @@ impl AuditLogFile { let size = file.metadata()?.len(); if size == 0 { - let identity = FileIdentity(AUDIT_IDENTITY); - let buffer = encode(&identity)?; - file.write_all(&buffer)?; + file.write_all(&AUDIT_IDENTITY)?; } Ok(file) @@ -50,11 +48,15 @@ impl AuditLogFile { #[async_trait] impl AuditProvider for AuditLogFile { type Error = crate::Error; - async fn append_audit_event( + async fn append_audit_events( &mut self, - log: AuditEvent, + events: &[AuditEvent], ) -> std::result::Result<(), Self::Error> { - let buffer = encode(&log)?; + let mut buffer = Vec::new(); + for event in events { + let mut event = encode(event)?; + buffer.append(&mut event); + } self.file.write_all(&buffer).await?; Ok(()) } diff --git a/workspace/check/src/lib.rs b/workspace/check/src/lib.rs index b5341e246a..71db3fc0b7 100644 --- a/workspace/check/src/lib.rs +++ b/workspace/check/src/lib.rs @@ -9,8 +9,7 @@ pub use error::Error; use sos_core::{ binary_rw::{FileStream, OpenType}, commit_tree::{ - integrity::{vault_commit_tree, wal_commit_tree}, - CommitTree, RowIterator, + vault_commit_tree, wal_commit_tree, CommitTree, RowIterator, }, wal::WalItem, }; diff --git a/workspace/client/Cargo.toml b/workspace/client/Cargo.toml index 99689194d0..270446ff5d 100644 --- a/workspace/client/Cargo.toml +++ b/workspace/client/Cargo.toml @@ -8,6 +8,8 @@ default-run = "sos-client" thiserror = "1" sos-core = { path = "../core" } sos-readline = { path = "../readline" } +async-trait = "0.1" +async-recursion = "1" clap = { version = "3", features = ["derive", "wrap_help"] } tokio = { version = "1", features = ["full"] } url = "2" diff --git a/workspace/client/src/bin/main.rs b/workspace/client/src/bin/main.rs index 752f0cc58f..76440898f2 100644 --- a/workspace/client/src/bin/main.rs +++ b/workspace/client/src/bin/main.rs @@ -7,12 +7,11 @@ use std::{ use clap::{Parser, Subcommand}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use url::Url; -use uuid::Uuid; use sos_client::{ - create_vault, exec, monitor, signup, Cache, ClientBuilder, Error, Result, + exec, monitor, run_blocking, signup, Cache, ClientBuilder, ClientCache, + Error, Result, }; -use sos_core::Algorithm; use sos_readline::read_shell; use terminal_banner::{Banner, Padding}; @@ -62,31 +61,6 @@ enum Command { #[clap(short, long)] keystore: PathBuf, }, - /// Create a new secret storage vault. - /// - /// A passphrase for the new vault will be read from - /// stdin if data is detected on stdin otherwise a - /// random diceware passphrase is generated and printed - /// to the terminal. - /// - /// The filename will be the UUID for the new vault. - Create { - /// Unique identifier for the vault. - #[clap(short, long)] - uuid: Option, - - /// Public name for the vault. - #[clap(short, long)] - name: Option, - - /// Encryption algorithm - #[clap(short, long)] - algorithm: Option, - - /// Directory to write the vault file - #[clap(parse(from_os_str))] - destination: PathBuf, - }, } /// Ensure a supplied URL is https. @@ -121,14 +95,6 @@ fn run() -> Result<()> { ensure_https(&server)?; monitor(server, keystore)?; } - Command::Create { - destination, - name, - uuid, - algorithm, - } => { - create_vault(destination, name, uuid, algorithm)?; - } Command::Signup { server, keystore, @@ -141,12 +107,19 @@ fn run() -> Result<()> { ensure_https(&server)?; let cache_dir = Cache::cache_dir()?; let client = ClientBuilder::new(server, keystore).build()?; - let cache = Arc::new(RwLock::new(Cache::new(client, cache_dir)?)); + let cache = + Arc::new(RwLock::new(Cache::new(client, cache_dir, true)?)); let reader = cache.read().unwrap(); - welcome(reader.client().server())?; + welcome(reader.server())?; drop(reader); + let mut writer = cache.write().unwrap(); + if let Err(e) = run_blocking(writer.load_vaults()) { + tracing::error!("failed to load vaults: {}", e); + } + drop(writer); + let prompt_cache = Arc::clone(&cache); let prompt = || -> String { let cache = prompt_cache.read().unwrap(); @@ -184,6 +157,7 @@ fn main() -> Result<()> { Ok(_) => {} Err(e) => { tracing::error!("{}", e); + //panic!("{}", e); } } Ok(()) diff --git a/workspace/client/src/cache.rs b/workspace/client/src/cache.rs index 296ce6e06a..a6fb578b2b 100644 --- a/workspace/client/src/cache.rs +++ b/workspace/client/src/cache.rs @@ -1,22 +1,37 @@ //! Cache of local WAL files. -use crate::{client::Client, Error, Result}; +use crate::{ + client::{decode_match_proof, Client}, + Conflict, Error, Result, +}; +use async_recursion::async_recursion; +use async_trait::async_trait; use reqwest::{Response, StatusCode}; use sos_core::{ - commit_tree::CommitProof, - events::{Patch, SyncEvent, WalEvent}, - file_identity::{FileIdentity, WAL_IDENTITY}, - gatekeeper::Gatekeeper, + address::AddressStr, + commit_tree::{wal_commit_tree, CommitProof, CommitTree}, + constants::{VAULT_BACKUP_EXT, WAL_DELETED_EXT, WAL_IDENTITY}, + encode, + events::{PatchFile, SyncEvent, WalEvent}, + generate_passphrase, secret::SecretRef, - vault::{CommitHash, Header, Summary, Vault}, - wal::{file::WalFile, reducer::WalReducer, WalProvider}, + vault::{Summary, Vault}, + wal::{ + file::{WalFile, WalFileRecord}, + reducer::WalReducer, + snapshot::{SnapShot, SnapShotManager}, + WalProvider, + }, + CommitHash, FileIdentity, Gatekeeper, VaultFileAccess, }; use std::{ borrow::Cow, collections::HashMap, - fs::OpenOptions, + fs::{File, OpenOptions}, io::Write, path::{Path, PathBuf}, }; +use tempfile::NamedTempFile; +use url::Url; use uuid::Uuid; fn assert_proofs_eq( @@ -32,6 +47,105 @@ fn assert_proofs_eq( } } +/// Trait for types that cache vaults locally; support a *current* view +/// into a selected vault and allow making changes to the currently +/// selected vault. +#[async_trait] +pub trait ClientCache { + /// Get the server URL. + fn server(&self) -> &Url; + + /// Get the address of the current user. + fn address(&self) -> Result; + + /// Get the vault summaries for this cache. + fn vaults(&self) -> &[Summary]; + + /// Get the snapshot manager for this cache. + fn snapshots(&self) -> &SnapShotManager; + + /// Take a snapshot of the WAL for the given vault. + fn take_snapshot(&self, summary: &Summary) -> Result<(SnapShot, bool)>; + + /// Get the history for a WAL file. + fn history( + &self, + summary: &Summary, + ) -> Result)>>; + + /// Compact a WAL file. + async fn compact(&mut self, summary: &Summary) -> Result<(u64, u64)>; + + /// Load the vault summaries from the remote server. + async fn load_vaults(&mut self) -> Result<&[Summary]>; + + /// Attempt to find a summary in this cache. + fn find_vault(&self, vault: &SecretRef) -> Option<&Summary>; + + /// Create a new account and default login vault. + async fn create_account( + &mut self, + name: Option, + ) -> Result; + + /// Create a new vault. + async fn create_vault(&mut self, name: String) -> Result; + + /// Remove a vault. + async fn remove_vault(&mut self, summary: &Summary) -> Result<()>; + + /// Attempt to set the vault name on the remote server. + async fn set_vault_name( + &mut self, + summary: &Summary, + name: &str, + ) -> Result<()>; + + /// Get a comparison between a local WAL and remote WAL. + async fn vault_status( + &self, + summary: &Summary, + ) -> Result<(CommitProof, CommitProof)>; + + /// Apply changes to a vault. + async fn patch_vault( + &mut self, + summary: &Summary, + events: Vec>, + ) -> Result<()>; + + /// Load a vault, unlock it and set it as the current vault. + async fn open_vault( + &mut self, + summary: &Summary, + password: &str, + ) -> Result<()>; + + /// Get the current in-memory vault access. + fn current(&self) -> Option<&Gatekeeper>; + + /// Get a mutable reference to the current in-memory vault access. + fn current_mut(&mut self) -> Option<&mut Gatekeeper>; + + /// Close the currently open vault. + /// + /// When a vault is open it is locked before being closed. + /// + /// If no vault is open this is a noop. + fn close_vault(&mut self); + + /// Get a reference to the commit tree for a WAL file. + fn wal_tree(&self, summary: &Summary) -> Option<&CommitTree>; + + /// Force a pull of the remote WAL regardless of the state of + /// the local WAL. + async fn force_pull(&mut self, summary: &Summary) -> Result<()>; + + /// Force a push of the local WAL regardless of the state of + /// the remote WAL. + async fn force_push(&mut self, summary: &Summary) -> Result<()>; +} + /// Implements client-side caching of WAL files. pub struct Cache { /// Vaults managed by this cache. @@ -43,12 +157,344 @@ pub struct Cache { /// Directory for the user cache. user_dir: PathBuf, /// Data for the cache. - cache: HashMap, + cache: HashMap, + /// Mirror WAL files and in-memory contents to vault files + mirror: bool, + /// Snapshots of the WAL files. + snapshots: SnapShotManager, +} + +#[async_trait] +impl ClientCache for Cache { + fn server(&self) -> &Url { + self.client.server() + } + + fn address(&self) -> Result { + self.client.address() + } + + fn vaults(&self) -> &[Summary] { + self.summaries.as_slice() + } + + fn snapshots(&self) -> &SnapShotManager { + &self.snapshots + } + + fn take_snapshot(&self, summary: &Summary) -> Result<(SnapShot, bool)> { + let (wal, _) = self + .cache + .get(summary.id()) + .ok_or(Error::CacheNotAvailable(summary.id().clone()))?; + let root_hash = wal.tree().root().ok_or(Error::NoRootCommit)?; + Ok(self.snapshots.create(summary.id(), wal.path(), root_hash)?) + } + + fn history( + &self, + summary: &Summary, + ) -> Result)>> { + let (wal, _) = self + .cache + .get(summary.id()) + .ok_or(Error::CacheNotAvailable(summary.id().clone()))?; + let mut records = Vec::new(); + for record in wal.iter()? { + let record = record?; + let event = wal.event_data(&record)?; + records.push((record, event)); + } + Ok(records) + } + + async fn compact(&mut self, summary: &Summary) -> Result<(u64, u64)> { + let (wal, _) = self + .cache + .get_mut(summary.id()) + .ok_or(Error::CacheNotAvailable(summary.id().clone()))?; + + let old_size = wal.path().metadata()?.len(); + + // Get the reduced set of events + let events = WalReducer::new().reduce(wal)?.compact()?; + let temp = NamedTempFile::new()?; + + // Apply them to a temporary WAL file + let mut temp_wal = WalFile::new(temp.path())?; + temp_wal.apply(events, None)?; + + let new_size = temp_wal.path().metadata()?.len(); + + // Remove the existing WAL file + std::fs::remove_file(wal.path())?; + // Move the temp file into place + std::fs::rename(temp.path(), wal.path())?; + + // Need to recreate the WAL file and load the updated + // commit tree + *wal = WalFile::new(wal.path())?; + wal.load_tree()?; + + // Verify the new WAL tree + wal_commit_tree(wal.path(), true, |_| {})?; + + self.force_push(summary).await?; + + // Refresh in-memory vault and mirrored copy + self.refresh_vault(summary)?; + + Ok((old_size, new_size)) + } + + async fn load_vaults(&mut self) -> Result<&[Summary]> { + let summaries = self.client.list_vaults().await?; + self.load_caches(&summaries)?; + self.summaries = summaries; + Ok(self.vaults()) + } + + fn find_vault(&self, vault: &SecretRef) -> Option<&Summary> { + match vault { + SecretRef::Name(name) => { + self.summaries.iter().find(|s| s.name() == name) + } + SecretRef::Id(id) => self.summaries.iter().find(|s| s.id() == id), + } + } + + async fn create_account( + &mut self, + name: Option, + ) -> Result { + self.create(name, true).await + } + + async fn create_vault(&mut self, name: String) -> Result { + self.create(Some(name), false).await + } + + async fn remove_vault(&mut self, summary: &Summary) -> Result<()> { + let current_id = self.current().map(|c| c.id().clone()); + + // Attempt to delete on the remote server + let (response, _) = self.client.delete_wal(summary.id()).await?; + response + .status() + .is_success() + .then_some(()) + .ok_or(Error::ResponseCode(response.status().into()))?; + + // If the deleted vault is the currently selected + // vault we must close it + if let Some(id) = ¤t_id { + if id == summary.id() { + self.close_vault(); + } + } + + // Remove local vault mirror if it exists + let vault_path = self.vault_path(summary); + if vault_path.exists() { + std::fs::remove_file(vault_path)?; + } + + // Rename the local WAL file so recovery is still possible + let wal_path = self.vault_path(summary); + if wal_path.exists() { + let mut wal_path_backup = wal_path.clone(); + wal_path_backup.set_extension(WAL_DELETED_EXT); + std::fs::rename(wal_path, wal_path_backup)?; + } + + // Remove from our cache of managed vaults + self.cache.remove(summary.id()); + let index = + self.summaries.iter().position(|s| s.id() == summary.id()); + if let Some(index) = index { + self.summaries.remove(index); + } + + Ok(()) + } + + async fn set_vault_name( + &mut self, + summary: &Summary, + name: &str, + ) -> Result<()> { + let event = SyncEvent::SetVaultName(Cow::Borrowed(name)); + let response = self.patch_wal(summary, vec![event]).await?; + if response.status().is_success() { + for item in self.summaries.iter_mut() { + if item.id() == summary.id() { + item.set_name(name.to_string()); + } + } + Ok(()) + } else { + Err(Error::ResponseCode(response.status().into())) + } + } + + async fn vault_status( + &self, + summary: &Summary, + ) -> Result<(CommitProof, CommitProof)> { + if let Some((wal, _)) = self.cache.get(summary.id()) { + let client_proof = wal.tree().head()?; + let (_response, server_proof) = + self.client.head_wal(summary.id()).await?; + Ok((client_proof, server_proof)) + } else { + Err(Error::CacheNotAvailable(*summary.id())) + } + } + + async fn open_vault( + &mut self, + summary: &Summary, + password: &str, + ) -> Result<()> { + let vault = self.get_wal_vault(summary).await?; + let mut keeper = if self.mirror { + let vault_path = self.vault_path(summary); + if !vault_path.exists() { + let buffer = encode(&vault)?; + let mut file = File::create(&vault_path)?; + file.write_all(&buffer)?; + } + + let mirror = Box::new(VaultFileAccess::new(vault_path)?); + Gatekeeper::new_mirror(vault, mirror) + } else { + Gatekeeper::new(vault) + }; + keeper + .unlock(password) + .map_err(|_| Error::VaultUnlockFail)?; + self.current = Some(keeper); + Ok(()) + } + + fn current(&self) -> Option<&Gatekeeper> { + self.current.as_ref() + } + + fn current_mut(&mut self) -> Option<&mut Gatekeeper> { + self.current.as_mut() + } + + async fn patch_vault( + &mut self, + summary: &Summary, + events: Vec>, + ) -> Result<()> { + let response = self.patch_wal(summary, events).await?; + response + .status() + .is_success() + .then_some(()) + .ok_or(Error::ResponseCode(response.status().into()))?; + Ok(()) + } + + fn close_vault(&mut self) { + if let Some(current) = self.current_mut() { + current.lock(); + } + self.current = None; + } + + fn wal_tree(&self, summary: &Summary) -> Option<&CommitTree> { + self.cache.get(summary.id()).map(|(wal, _)| wal.tree()) + } + + async fn force_pull(&mut self, summary: &Summary) -> Result<()> { + // Move our cached vault to a backup + let vault_path = self.vault_path(summary); + if vault_path.exists() { + let mut vault_backup = vault_path.clone(); + vault_backup.set_extension(VAULT_BACKUP_EXT); + std::fs::rename(&vault_path, &vault_backup)?; + tracing::debug!( + vault = ?vault_path, backup = ?vault_backup, "vault backup"); + } + + let (wal, _) = self + .cache + .get_mut(summary.id()) + .ok_or(Error::CacheNotAvailable(*summary.id()))?; + + // Create a snapshot of the WAL before deleting it + let root_hash = wal.tree().root().ok_or(Error::NoRootCommit)?; + let (snapshot, _) = + self.snapshots.create(summary.id(), wal.path(), root_hash)?; + tracing::debug!( + path = ?snapshot.0, "force_pull snapshot"); + + // Remove the existing WAL file + std::fs::remove_file(wal.path())?; + + // Need to recreate the WAL file correctly before pulling + // as pull_wal() expects the file to exist + *wal = WalFile::new(wal.path())?; + wal.load_tree()?; + + // Pull the remote WAL + self.pull_wal(summary).await?; + + let (_, patch_file) = self + .cache + .get(summary.id()) + .ok_or(Error::CacheNotAvailable(*summary.id()))?; + + tracing::debug!("force_pull reading from patch file"); + + let patch = patch_file.read()?; + let events = patch.0; + + tracing::debug!(events = events.len(), "force_pull"); + + // Got some events which haven't been saved so try + // to apply them over the top of the new WAL + if !events.is_empty() { + self.patch_vault(summary, events).await?; + self.refresh_vault(summary)?; + } + + Ok(()) + } + + async fn force_push(&mut self, summary: &Summary) -> Result<()> { + let (wal, _) = self + .cache + .get(summary.id()) + .ok_or(Error::CacheNotAvailable(*summary.id()))?; + let proof = wal.tree().head()?; + let body = std::fs::read(wal.path())?; + let (response, _) = + self.client.post_wal(summary.id(), &proof, body).await?; + response + .status() + .is_success() + .then_some(()) + .ok_or(Error::ResponseCode(response.status().into()))?; + Ok(()) + } } impl Cache { - /// Create a new cache using the given client and root directory. - pub fn new>(client: Client, cache_dir: D) -> Result { + /// Create a new cache using the given client and cache directory. + /// + /// If the `mirror` option is given then the cache will mirror WAL files + /// and in-memory content to disc as vault files providing an extra level + /// if redundancy in case of failure. + pub fn new>( + client: Client, + cache_dir: D, + mirror: bool, + ) -> Result { let cache_dir = cache_dir.as_ref().to_path_buf(); if !cache_dir.is_dir() { return Err(Error::NotDirectory(cache_dir)); @@ -59,310 +505,383 @@ impl Cache { let user_dir = cache_dir.join(&address); std::fs::create_dir_all(&user_dir)?; + let snapshots = SnapShotManager::new(&user_dir)?; + Ok(Self { summaries: Default::default(), current: None, client, user_dir, cache: Default::default(), + mirror, + snapshots, }) } - /// Get the vault summaries for this cache. - pub fn summaries(&self) -> &[Summary] { - self.summaries.as_slice() + /// Get the default root directory used for caching client data. + /// + /// If the `CACHE_DIR` environment variable is set it is used + /// instead of the default location. + pub fn cache_dir() -> Result { + let cache_dir = if let Ok(env_cache_dir) = std::env::var("CACHE_DIR") + { + let cache_dir = PathBuf::from(env_cache_dir); + if !cache_dir.is_dir() { + return Err(Error::NotDirectory(cache_dir)); + } + cache_dir + } else { + let data_local_dir = + dirs::data_local_dir().ok_or(Error::NoDataLocalDir)?; + let cache_dir = data_local_dir.join("sos"); + if !cache_dir.exists() { + std::fs::create_dir(&cache_dir)?; + } + cache_dir + }; + tracing::debug!(cache_dir = ?cache_dir, "cache_dir"); + Ok(cache_dir) } - /// Get the client for server communication. - pub fn client(&self) -> &Client { - &self.client - } + /// Create a new account or vault. + async fn create( + &mut self, + name: Option, + is_account: bool, + ) -> Result { + let (passphrase, vault, buffer) = self.new_vault(name)?; + let summary = vault.summary().clone(); + + if self.mirror { + let vault_path = self.vault_path(&summary); + let mut file = File::create(vault_path)?; + file.write_all(&buffer)?; + } - /// Get the current in-memory vault access. - pub fn current(&self) -> Option<&Gatekeeper> { - self.current.as_ref() - } + let response = if is_account { + self.client.create_account(buffer).await? + } else { + let (response, _) = self.client.create_wal(buffer).await?; + response + }; - /// Get a mutable reference to the current in-memory vault access. - pub fn current_mut(&mut self) -> Option<&mut Gatekeeper> { - self.current.as_mut() - } + response + .status() + .is_success() + .then_some(()) + .ok_or(Error::ResponseCode(response.status().into()))?; + self.summaries.push(summary); - /// Set the currently active in-memory vault. - pub fn set_current(&mut self, current: Option) { - self.current = current; + Ok(passphrase) } - /// Attempt to find a summary in this cache. - pub fn find_summary(&self, vault: &SecretRef) -> Option<&Summary> { - match vault { - SecretRef::Name(name) => { - self.summaries.iter().find(|s| s.name() == name) - } - SecretRef::Id(id) => self.summaries.iter().find(|s| s.id() == id), + fn new_vault( + &self, + name: Option, + ) -> Result<(String, Vault, Vec)> { + let (passphrase, _) = generate_passphrase()?; + let mut vault: Vault = Default::default(); + if let Some(name) = name { + vault.set_name(name); } - } - - /// Load the vault summaries from the remote server. - pub async fn load_summaries(&mut self) -> Result<&[Summary]> { - let summaries = self.client.list_vaults().await?; - self.load_caches(&summaries)?; - self.summaries = summaries; - Ok(self.summaries()) + vault.initialize(&passphrase)?; + let buffer = encode(&vault)?; + Ok((passphrase, vault, buffer)) } fn load_caches(&mut self, summaries: &[Summary]) -> Result<()> { for summary in summaries { - let cached_wal_path = self.wal_path(summary); - if cached_wal_path.exists() { - let mut wal_file = WalFile::new(&cached_wal_path)?; - wal_file.load_tree()?; - self.cache - .insert(*summary.id(), (cached_wal_path, wal_file)); - } + let patch_path = self.patch_path(summary); + let patch_file = PatchFile::new(patch_path)?; + + let wal_path = self.wal_path(summary); + let mut wal_file = WalFile::new(&wal_path)?; + wal_file.load_tree()?; + self.cache.insert(*summary.id(), (wal_file, patch_file)); } Ok(()) } - fn wal_path(&self, summary: &Summary) -> PathBuf { + pub fn wal_path(&self, summary: &Summary) -> PathBuf { let wal_name = format!("{}.{}", summary.id(), WalFile::extension()); self.user_dir.join(&wal_name) } - /// Load a vault by attempting to fetch the WAL file and caching - /// the result on disc. - pub async fn load_vault(&mut self, summary: &Summary) -> Result { + pub fn vault_path(&self, summary: &Summary) -> PathBuf { + let wal_name = format!("{}.{}", summary.id(), Vault::extension()); + self.user_dir.join(&wal_name) + } + + pub fn patch_path(&self, summary: &Summary) -> PathBuf { + let patch_name = + format!("{}.{}", summary.id(), PatchFile::extension()); + self.user_dir.join(&patch_name) + } + + /// Fetch the remote WAL file. + async fn pull_wal(&mut self, summary: &Summary) -> Result<()> { let cached_wal_path = self.wal_path(summary); + let wal = self + .cache + .get_mut(summary.id()) + .map(|(w, _)| w) + .ok_or(Error::CacheNotAvailable(*summary.id()))?; - // Cache already exists so attempt to get a diff of records - // to append - let cached = if cached_wal_path.exists() { - let mut wal_file = WalFile::new(&cached_wal_path)?; - wal_file.load_tree()?; - let proof = wal_file.tree().head()?; - (wal_file, Some(proof)) - // Otherwise prepare a new WAL cache + let client_proof = if let Some(_) = wal.tree().root() { + let proof = wal.tree().head()?; + tracing::debug!(root = %proof.root_hex(), "pull_wal wants diff"); + Some(proof) } else { - let wal_file = WalFile::new(&cached_wal_path)?; - (wal_file, None) + None }; - let (response, server_proof) = - self.client.get_wal(summary.id(), cached.1.as_ref()).await?; + let (response, server_proof) = self + .client + .get_wal(summary.id(), client_proof.as_ref()) + .await?; let status = response.status(); + tracing::debug!(status = %status, "pull_wal"); + match status { StatusCode::OK => { - if let Some(server_proof) = server_proof { - let (client_proof, wal_file) = match cached { - // If we sent a proof to the server then we - // are expecting a diff of records - (mut wal_file, Some(_proof)) => { - let buffer = response.bytes().await?; - - // Check the identity looks good - FileIdentity::read_slice(&buffer, &WAL_IDENTITY)?; - - // Get buffer of log records after the identity - let record_bytes = - &buffer[WAL_IDENTITY.len() - 1..buffer.len()]; - - // Append the diff bytes without the identity - let mut file = OpenOptions::new() - .write(true) - .append(true) - .open(&cached_wal_path)?; - file.write_all(record_bytes)?; - wal_file.load_tree()?; - - (wal_file.tree().head()?, wal_file) - } - // Otherwise the server should send us the entire - // WAL file - (mut wal_file, None) => { - // Read in the entire response buffer - let buffer = response.bytes().await?; + let server_proof = server_proof.ok_or(Error::ServerProof)?; + tracing::debug!( + server_root_hash = %server_proof.root_hex(), "pull_wal"); - // Check the identity looks good - FileIdentity::read_slice(&buffer, &WAL_IDENTITY)?; + let client_proof = match client_proof { + // If we sent a proof to the server then we + // are expecting a diff of records + Some(_proof) => { + let buffer = response.bytes().await?; - std::fs::write(&cached_wal_path, &buffer)?; - wal_file.load_tree()?; + tracing::debug!(bytes = ?buffer.len(), + "pull_wal write diff WAL records"); - (wal_file.tree().head()?, wal_file) - } - }; + // Check the identity looks good + FileIdentity::read_slice(&buffer, &WAL_IDENTITY)?; - assert_proofs_eq(client_proof, server_proof)?; + // Get buffer of log records after the identity bytes + let record_bytes = &buffer[WAL_IDENTITY.len()..]; - self.cache - .insert(*summary.id(), (cached_wal_path, wal_file)); + debug_assert!(record_bytes.len() == buffer.len() - 4); - // Build the vault from the WAL file - let (_, wal) = self.cache.get_mut(summary.id()).unwrap(); - let vault = WalReducer::new().reduce(wal)?.build()?; + // Append the diff bytes without the identity + let mut file = OpenOptions::new() + .write(true) + .append(true) + .open(&cached_wal_path)?; + file.write_all(record_bytes)?; - Ok(vault) - } else { - Err(Error::ServerProof) - } - } - StatusCode::NOT_MODIFIED => { - // Build the vault from the cached WAL file - if let Some((_, wal)) = self.cache.get_mut(summary.id()) { - if let Some(server_proof) = server_proof { - let client_proof = wal.tree().head()?; - assert_proofs_eq(client_proof, server_proof)?; - let vault = WalReducer::new().reduce(wal)?.build()?; - Ok(vault) - } else { - Err(Error::ServerProof) + // Update with the new commit tree + wal.load_tree()?; + + wal.tree().head()? } - } else { - Err(Error::CacheNotAvailable(*summary.id())) - } - } - StatusCode::CONFLICT => { - todo!("handle conflicts"); - } - _ => { - todo!("handle errors {:#?}", response); - } - } - } + // Otherwise the server should send us the entire + // WAL file + None => { + // Read in the entire response buffer + let buffer = response.bytes().await?; - /// Attempt to patch a remote WAL file. - pub async fn patch_vault( - &mut self, - summary: &Summary, - events: Vec>, - ) -> Result { - if let Some((_, wal)) = self.cache.get_mut(summary.id()) { - let patch = Patch(events); - let proof = wal.tree().head()?; - let (response, server_proof) = - self.client.patch_wal(summary.id(), &proof, &patch).await?; + tracing::debug!(bytes = ?buffer.len(), + "pull_wal write entire WAL"); - let status = response.status(); - match status { - StatusCode::OK => { - let server_proof = - server_proof.ok_or(Error::ServerProof)?; - let change_set = patch.0; - - // Apply changes to the local WAL file - let mut changes = Vec::new(); - for event in change_set { - if let Ok::, sos_core::Error>( - wal_event, - ) = event.try_into() - { - changes.push(wal_event); - } + // Check the identity looks good + FileIdentity::read_slice(&buffer, &WAL_IDENTITY)?; + + std::fs::write(&cached_wal_path, &buffer)?; + wal.load_tree()?; + + wal.tree().head()? } + }; - // Pass the expected root hash so changes are reverted - // if the root hashes do not match - wal.apply(changes, Some(CommitHash(server_proof.0)))?; + assert_proofs_eq(client_proof, server_proof)?; + Ok(()) + } + StatusCode::NOT_MODIFIED => { + // Verify that both proofs are equal + if let Some((wal, _)) = self.cache.get(summary.id()) { + let server_proof = + server_proof.ok_or(Error::ServerProof)?; let client_proof = wal.tree().head()?; assert_proofs_eq(client_proof, server_proof)?; - Ok(response) - } - StatusCode::CONFLICT => { - todo!("handle patch conflict"); - } - _ => { - todo!("handle patch errors"); + Ok(()) + } else { + Err(Error::CacheNotAvailable(*summary.id())) } } - } else { - Err(Error::CacheNotAvailable(*summary.id())) - } - } - - /// Attempt to set the vault name on the remote server. - pub async fn set_vault_name( - &mut self, - summary: &Summary, - name: &str, - ) -> Result { - let event = SyncEvent::SetVaultName(Cow::Borrowed(name)); - let response = self.patch_vault(summary, vec![event]).await?; - - if response.status().is_success() { - for item in self.summaries.iter_mut() { - if item.id() == summary.id() { - item.set_name(name.to_string()); + StatusCode::CONFLICT => { + // If we are expecting a diff but got a conflict + // from the server then the trees have diverged. + // + // We should pull from the server a complete fresh + // tree at this point so we can get back in sync + // however we need confirmation that this is allowed + // from the user. + if let Some(client_proof) = client_proof { + let server_proof = + server_proof.ok_or(Error::ServerProof)?; + Err(Error::Conflict(Conflict { + summary: summary.clone(), + local: client_proof.reduce(), + remote: server_proof.reduce(), + })) + } else { + Err(Error::ResponseCode(response.status().into())) } } + _ => Err(Error::ResponseCode(response.status().into())), } - - Ok(response) } - /// Create a new WAL file. - pub async fn create_wal(&mut self, vault: Vec) -> Result { - let summary = Header::read_summary_slice(&vault)?; - let (response, _) = self.client.create_wal(vault).await?; - if response.status().is_success() { - self.summaries.push(summary); - } - Ok(response) + /// Load a vault by attempting to fetch the WAL file and caching + /// the result on disc then building an in-memory vault from the WAL. + async fn get_wal_vault(&mut self, summary: &Summary) -> Result { + // Fetch latest version of the WAL content + self.pull_wal(summary).await?; + + // Reduce the WAL to a vault + let wal = self + .cache + .get_mut(summary.id()) + .map(|(w, _)| w) + .ok_or(Error::CacheNotAvailable(*summary.id()))?; + let vault = WalReducer::new().reduce(wal)?.build()?; + Ok(vault) } - /// Delete an existing WAL file. - pub async fn delete_wal( + /// Attempt to patch a remote WAL file. + #[async_recursion] + async fn patch_wal( &mut self, summary: &Summary, + events: Vec>, ) -> Result { - let current_id = self.current().map(|c| c.id().clone()); + let (wal, patch_file) = self + .cache + .get_mut(summary.id()) + .ok_or(Error::CacheNotAvailable(*summary.id()))?; - let (response, _) = self.client.delete_wal(summary.id()).await?; + let patch = patch_file.append(events)?; - if response.status().is_success() { - // If the deleted vault is the currently selected - // vault we must clear the selection - if let Some(id) = ¤t_id { - if id == summary.id() { - self.set_current(None); + let client_proof = wal.tree().head()?; + + let (response, server_proof) = self + .client + .patch_wal(summary.id(), &client_proof, &patch) + .await?; + + let status = response.status(); + match status { + StatusCode::OK => { + let server_proof = server_proof.ok_or(Error::ServerProof)?; + + // Apply changes to the local WAL file + let mut changes = Vec::new(); + for event in patch.0 { + if let Ok::, sos_core::Error>(wal_event) = + event.try_into() + { + changes.push(wal_event); + } } - } - let index = - self.summaries.iter().position(|s| s.id() == summary.id()); - if let Some(index) = index { - self.summaries.remove(index); - } - } + // Pass the expected root hash so changes are reverted + // if the root hashes do not match + wal.apply(changes, Some(CommitHash(server_proof.0)))?; - Ok(response) - } + patch_file.truncate()?; - /// Get a comparison between a local WAL and remote WAL. - pub async fn head_wal( - &self, - summary: &Summary, - ) -> Result<(CommitProof, CommitProof)> { - if let Some((_, wal)) = self.cache.get(summary.id()) { - let client_proof = wal.tree().head()?; - let (_response, server_proof) = - self.client.head_wal(summary.id()).await?; - Ok((client_proof, server_proof)) - } else { - Err(Error::CacheNotAvailable(*summary.id())) + let client_proof = wal.tree().head()?; + assert_proofs_eq(client_proof, server_proof)?; + Ok(response) + } + StatusCode::CONFLICT => { + let server_proof = server_proof.ok_or(Error::ServerProof)?; + + // Server replied with a proof that they have a + // leaf node corresponding to our root hash which + // indicates that we are behind the remote so we + // can try to pull again and try to patch afterwards + if let Some(_) = decode_match_proof(response.headers())? { + tracing::debug!( + client_root = %client_proof.root_hex(), + server_root = %server_proof.root_hex(), + "conflict on patch, attempting sync"); + + // Pull the WAL from the server that we + // are behind + self.pull_wal(summary).await?; + + tracing::debug!(vault_id = %summary.id(), + "conflict on patch, pulled remote WAL"); + + // Retry sending our local changes to + // the remote WAL + let response = + self.patch_wal(summary, patch.0.clone()).await?; + + tracing::debug!(status = %response.status(), + "conflict on patch, retry patch status"); + + if response.status().is_success() { + // If the retry was successful then + // we should update the in-memory vault + // so if reflects the pulled changes + // with our patch applied over the top + let updated_vault = + self.get_wal_vault(summary).await?; + + if let Some(keeper) = self.current_mut() { + if keeper.id() == summary.id() { + let existing_vault = keeper.vault_mut(); + *existing_vault = updated_vault; + } + } + } + Ok(response) + } else { + Err(Error::Conflict(Conflict { + summary: summary.clone(), + local: client_proof.reduce(), + remote: server_proof.reduce(), + })) + } + } + _ => Err(Error::ResponseCode(response.status().into())), } } - /// Get the default root directory used for caching client data. - pub fn cache_dir() -> Result { - let data_local_dir = - dirs::data_local_dir().ok_or(Error::NoDataLocalDir)?; - let cache_dir = data_local_dir.join("sos"); - if !cache_dir.exists() { - std::fs::create_dir(&cache_dir)?; + // Refresh the in-memory vault of the current selection + // from the contents of the current WAL file. + fn refresh_vault(&mut self, summary: &Summary) -> Result<()> { + let wal = self + .cache + .get_mut(summary.id()) + .map(|(w, _)| w) + .ok_or(Error::CacheNotAvailable(*summary.id()))?; + let vault = WalReducer::new().reduce(wal)?.build()?; + let mirror = self.mirror; + let vault_path = self.vault_path(summary); + + if let Some(keeper) = self.current_mut() { + if keeper.id() == summary.id() { + // Rewrite the on-disc version if we are mirroring + if mirror { + let buffer = encode(&vault)?; + let mut file = File::create(&vault_path)?; + file.write_all(&buffer)?; + } + + // Update the in-memory version + let keeper_vault = keeper.vault_mut(); + *keeper_vault = vault; + } } - Ok(cache_dir) + Ok(()) } } diff --git a/workspace/client/src/client.rs b/workspace/client/src/client.rs index e508aa3bf8..0648b6d411 100644 --- a/workspace/client/src/client.rs +++ b/workspace/client/src/client.rs @@ -6,9 +6,10 @@ use reqwest::{ use reqwest_eventsource::EventSource; use sos_core::{ address::AddressStr, - commit_tree::{decode_proof, encode_proof, CommitProof}, + commit_tree::CommitProof, + constants::{X_COMMIT_PROOF, X_MATCH_PROOF, X_SIGNED_MESSAGE}, + decode, events::Patch, - headers::{X_COMMIT_HASH, X_COMMIT_PROOF, X_SIGNED_MESSAGE}, signer::Signer, vault::{encode, Summary, MIME_TYPE_VAULT}, }; @@ -25,14 +26,21 @@ const AUTHORIZATION: &str = "authorization"; const CONTENT_TYPE: &str = "content-type"; fn decode_headers_proof(headers: &HeaderMap) -> Result> { - if let (Some(commit_hash), Some(commit_proof)) = - (headers.get(X_COMMIT_HASH), headers.get(X_COMMIT_PROOF)) - { - let commit_hash = base64::decode(commit_hash)?; - let commit_proof = base64::decode(commit_proof)?; - let commit_hash: [u8; 32] = commit_hash.as_slice().try_into()?; - let commit_proof = decode_proof(&commit_proof)?; - Ok(Some(CommitProof(commit_hash, commit_proof))) + if let Some(commit_proof) = headers.get(X_COMMIT_PROOF) { + let value = base64::decode(commit_proof)?; + let value: CommitProof = decode(&value)?; + Ok(Some(value)) + } else { + Ok(None) + } +} + +pub(crate) fn decode_match_proof( + headers: &HeaderMap, +) -> Result>> { + if let Some(leaf_proof) = headers.get(X_MATCH_PROOF) { + let leaf_proof = base64::decode(leaf_proof)?; + Ok(Some(leaf_proof)) } else { Ok(None) } @@ -41,17 +49,12 @@ fn decode_headers_proof(headers: &HeaderMap) -> Result> { fn encode_headers_proof( mut builder: RequestBuilder, proof: &CommitProof, -) -> RequestBuilder { - builder = builder.header(X_COMMIT_HASH, base64::encode(&proof.0)); - builder = builder - .header(X_COMMIT_PROOF, base64::encode(encode_proof(&proof.1))); - builder +) -> Result { + let value = encode(proof)?; + builder = builder.header(X_COMMIT_PROOF, base64::encode(&value)); + Ok(builder) } -/// Encapsulates the information returned -/// by sending a HEAD request for a vault. -pub struct VaultInfo {} - pub struct Client { server: Url, http_client: HttpClient, @@ -99,32 +102,44 @@ impl Client { let url = self.server.join("api/auth")?; let (message, signature) = self.self_signed().await?; - let challenge: (Uuid, Challenge) = self + let response = self .http_client .get(url) .header(AUTHORIZATION, self.bearer_prefix(&signature)) .header(X_SIGNED_MESSAGE, base64::encode(&message)) .send() - .await? - .json() .await?; + response + .status() + .is_success() + .then_some(()) + .ok_or(Error::ResponseCode(response.status().into()))?; + + let challenge: (Uuid, Challenge) = response.json().await?; + let (uuid, message) = challenge; let url = format!("api/auth/{}", uuid); let url = self.server.join(&url)?; let signature = self.encode_signature(self.signer.sign(&message).await?)?; - let summaries: Vec = self + let response = self .http_client .get(url) .header(AUTHORIZATION, self.bearer_prefix(&signature)) .header(X_SIGNED_MESSAGE, base64::encode(&message)) .send() - .await? - .json() .await?; + response + .status() + .is_success() + .then_some(()) + .ok_or(Error::ResponseCode(response.status().into()))?; + + let summaries: Vec = response.json().await?; + Ok(summaries) } @@ -199,7 +214,7 @@ impl Client { .header(X_SIGNED_MESSAGE, base64::encode(&message)); if let Some(proof) = proof { - builder = encode_headers_proof(builder, proof); + builder = encode_headers_proof(builder, proof)?; } let response = builder.send().await?; @@ -227,7 +242,7 @@ impl Client { .patch(url) .header(AUTHORIZATION, self.bearer_prefix(&signature)); - builder = encode_headers_proof(builder, proof); + builder = encode_headers_proof(builder, proof)?; builder = builder.body(message); let response = builder.send().await?; @@ -236,6 +251,31 @@ impl Client { Ok((response, server_proof)) } + /// Replace a WAL file. + pub async fn post_wal( + &self, + vault_id: &Uuid, + proof: &CommitProof, + body: Vec, + ) -> Result<(Response, Option)> { + let url = self.server.join(&format!("api/vaults/{}", vault_id))?; + let signature = + self.encode_signature(self.signer.sign(&body).await?)?; + let mut builder = self + .http_client + .post(url) + .header(AUTHORIZATION, self.bearer_prefix(&signature)) + .header(CONTENT_TYPE, MIME_TYPE_VAULT) + .body(body); + + builder = encode_headers_proof(builder, proof)?; + + let response = builder.send().await?; + let headers = response.headers(); + let server_proof = decode_headers_proof(headers)?; + Ok((response, server_proof)) + } + /// Get the commit proof for a remote WAL file. pub async fn head_wal( &self, diff --git a/workspace/client/src/create.rs b/workspace/client/src/create.rs deleted file mode 100644 index 28242638bb..0000000000 --- a/workspace/client/src/create.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::path::PathBuf; - -use sos_core::{ - diceware, - vault::{Vault, DEFAULT_VAULT_NAME}, - Algorithm, -}; -use uuid::Uuid; - -use sos_readline::read_stdin; - -use crate::{Error, Result}; - -/// Create a new empty vault -pub fn vault( - destination: PathBuf, - name: Option, - uuid: Option, - algorithm: Option, -) -> Result<()> { - if !destination.is_dir() { - return Err(Error::NotDirectory(destination)); - } - - let uuid = uuid.unwrap_or_else(Uuid::new_v4); - let file_name = uuid.to_string(); - - let mut vault_path = destination.join(&file_name); - vault_path.set_extension(Vault::extension()); - - if vault_path.exists() { - return Err(Error::FileExists(vault_path)); - } - - let (passphrase, generated) = if let Some(passphrase) = read_stdin()? { - (passphrase, false) - } else { - let (passphrase, _) = diceware::generate()?; - (passphrase, true) - }; - - let algorithm = if let Some(algo) = algorithm { - algo - } else { - Default::default() - }; - - let name = name.unwrap_or_else(|| String::from(DEFAULT_VAULT_NAME)); - - let mut vault = Vault::new(uuid, name, algorithm); - vault.initialize(&passphrase)?; - vault.write_file(&vault_path)?; - - tracing::info!("wrote vault to {}", vault_path.display()); - - if generated { - let delimiter = "-".repeat(60); - tracing::info!("generated diceware passphrase"); - tracing::info!("{}", delimiter); - tracing::info!("{}", passphrase); - tracing::info!("{}", delimiter); - } - Ok(()) -} diff --git a/workspace/client/src/error.rs b/workspace/client/src/error.rs index e8cb5e36f0..6c0011bbbb 100644 --- a/workspace/client/src/error.rs +++ b/workspace/client/src/error.rs @@ -1,9 +1,20 @@ -use sos_core::{secret::SecretRef, vault::CommitHash}; +use sos_core::{secret::SecretRef, vault::Summary, CommitHash}; use std::path::PathBuf; use thiserror::Error; use url::Url; use uuid::Uuid; +/// Represents a conflict response that may be resolved. +/// +/// Includes the root hashes and the leaves length for +/// each commit tree. +#[derive(Debug)] +pub struct Conflict { + pub summary: Summary, + pub local: ([u8; 32], usize), + pub remote: ([u8; 32], usize), +} + #[derive(Debug, Error)] pub enum Error { #[error("path {0} is not a directory")] @@ -39,34 +50,13 @@ pub enum Error { #[error(r#"secret "{0}" not found"#)] SecretNotAvailable(SecretRef), - #[error("failed to create vault, got status code {0}")] - VaultCreate(u16), - - #[error("failed to delete vault, got status code {0}")] - VaultRemove(u16), - - #[error("failed to set vault name, got status code {0}")] - SetVaultName(u16), - - #[error("failed to add secret, got status code {0}")] - AddSecret(u16), - - #[error("failed to read secret, got status code {0}")] - ReadSecret(u16), - - #[error("failed to set secret, got status code {0}")] - SetSecret(u16), - - #[error("failed to delete secret, got status code {0}")] - DelSecret(u16), - - #[error("failed to rename secret, got status code {0}")] - MvSecret(u16), + #[error("unexpected response status code {0}")] + ResponseCode(u16), #[error("editor command did not exit successfully, status {0}")] EditorExit(i32), - #[error("client and server root hashes do not match; client = {0}, server = {1}")] + #[error("local and remote root hashes do not match; local = {0}, remote = {1}; you may need to pull or push to sync changes")] RootHashMismatch(CommitHash, CommitHash), #[error("server failed to send the expected commit proof headers")] @@ -75,6 +65,13 @@ pub enum Error { #[error("cache not available for {0}")] CacheNotAvailable(Uuid), + #[error("conflict detected that may be resolvable")] + Conflict(Conflict), + + /// Error generated when a commit tree is expected to have a root. + #[error("commit tree does not have a root")] + NoRootCommit, + #[error(transparent)] ParseInt(#[from] std::num::ParseIntError), diff --git a/workspace/client/src/lib.rs b/workspace/client/src/lib.rs index 977e02fde1..b64ae30cc4 100644 --- a/workspace/client/src/lib.rs +++ b/workspace/client/src/lib.rs @@ -10,7 +10,6 @@ use web3_keystore::{decrypt, KeyStore}; mod cache; mod client; -mod create; mod error; mod monitor; mod shell; @@ -21,7 +20,7 @@ pub type Result = std::result::Result; /// Runs a future blocking the current thread so we can /// merge the synchronous nature of the shell prompt with the /// asynchronous API exposed by the client. -pub(crate) fn run_blocking(func: F) -> Result +pub fn run_blocking(func: F) -> Result where F: Future> + Send, R: Send, @@ -50,6 +49,10 @@ impl ClientBuilder { /// Build a client implementation wrapping a signing key. pub fn build(self) -> Result { + if !self.keystore.exists() { + return Err(Error::NotFile(self.keystore)); + } + // Decrypt the keystore and create the client. let mut keystore_file = File::open(&self.keystore)?; let mut keystore_bytes = Vec::new(); @@ -65,10 +68,9 @@ impl ClientBuilder { } } -pub use cache::Cache; -pub use client::{Client, VaultInfo}; -pub use create::vault as create_vault; -pub use error::Error; +pub use cache::{Cache, ClientCache}; +pub use client::Client; +pub use error::{Conflict, Error}; pub use monitor::monitor; pub use shell::exec; pub use signup::signup; diff --git a/workspace/client/src/monitor.rs b/workspace/client/src/monitor.rs index 50f9738838..3e7bf4c896 100644 --- a/workspace/client/src/monitor.rs +++ b/workspace/client/src/monitor.rs @@ -8,7 +8,7 @@ use sos_core::events::ChangeEvent; use crate::{run_blocking, Client, ClientBuilder, Result}; -async fn stream(client: Client) -> Result<()> { +async fn stream(client: &Client) -> Result<()> { let mut es = client.changes().await?; while let Some(event) = es.next().await { match event { @@ -68,7 +68,7 @@ async fn stream(client: Client) -> Result<()> { /// Start a monitor listening for events on the SSE stream. pub fn monitor(server: Url, keystore: PathBuf) -> Result<()> { let client = ClientBuilder::new(server, keystore).build()?; - if let Err(e) = run_blocking(stream(client)) { + if let Err(e) = run_blocking(stream(&client)) { tracing::error!("{}", e); std::process::exit(1); } diff --git a/workspace/client/src/shell/mod.rs b/workspace/client/src/shell/mod.rs index 389acd2582..82745ca6ca 100644 --- a/workspace/client/src/shell/mod.rs +++ b/workspace/client/src/shell/mod.rs @@ -3,7 +3,7 @@ use std::{ collections::HashMap, ffi::OsString, path::PathBuf, - sync::{Arc, RwLock}, + sync::{Arc, RwLock, RwLockWriteGuard}, }; use clap::{CommandFactory, Parser, Subcommand}; @@ -13,23 +13,30 @@ use url::Url; use human_bytes::human_bytes; use sos_core::{ - diceware::generate, - gatekeeper::Gatekeeper, + commit_tree::wal_commit_tree, secret::{Secret, SecretId, SecretMeta, SecretRef}, - vault::{ - encode, CommitHash, Vault, VaultAccess, VaultCommit, VaultEntry, - }, + vault::{Vault, VaultAccess, VaultCommit, VaultEntry}, + wal::WalItem, + CommitHash, }; use sos_readline::{ - read_flag, read_line, read_line_allow_empty, read_multiline, read_option, - read_password, + choose, read_flag, read_line, read_line_allow_empty, read_multiline, + read_option, read_password, Choice, }; -use crate::{display_passphrase, run_blocking, Cache, Error, Result}; +use crate::{ + display_passphrase, run_blocking, Cache, ClientCache, Error, Result, +}; mod editor; mod print; +enum ConflictChoice { + Push, + Pull, + Noop, +} + /// Secret storage shell. #[derive(Parser, Debug)] #[clap(name = "sos-shell", author, version, about, long_about = None)] @@ -43,17 +50,28 @@ enum ShellCommand { /// List vaults. Vaults, /// Create a new vault. - Create { name: String }, + Create { + /// Name for the new vault. + name: String, + }, /// Delete a vault. - Remove { vault: SecretRef }, + Remove { + /// Vault reference, it's name or identifier. + vault: SecretRef, + }, /// Select a vault. - Use { vault: SecretRef }, + Use { + /// Vault reference, it's name or identifier. + vault: SecretRef, + }, /// Print information about the selected vault. Info, /// Get or set the name of the selected vault. Name { name: Option }, /// Print commit status. Status, + /// Print commit tree leaves for the WAL of the current vault. + Tree, /// Print secret keys for the selected vault. Keys, /// List secrets for the selected vault. @@ -79,6 +97,16 @@ enum ShellCommand { secret: SecretRef, label: Option, }, + /// Manage snapshots for the selected vault. + Snapshot { + #[clap(subcommand)] + cmd: SnapShot, + }, + /// Inspect the history for the selected vault. + History { + #[clap(subcommand)] + cmd: History, + }, /// Print the current identity. Whoami, /// Close the selected vault. @@ -100,6 +128,39 @@ enum Add { File { path: String, label: Option }, } +#[derive(Subcommand, Debug)] +enum SnapShot { + /// Take a snapshot of the current WAL state. + Take, + /// List snapshots. + #[clap(alias = "ls")] + List { + /// Print more information; includes file path and size. + #[clap(short, long)] + long: bool, + }, + // TODO: support removing all existing snapshots: `purge`? +} + +#[derive(Subcommand, Debug)] +enum History { + /// Compact the currently selected vault WAL file. + Compact { + /// Take a snapshot before compaction. + #[clap(short, long)] + snapshot: bool, + }, + /// Verify the integrity of the WAL file. + Check, + /// List history events. + #[clap(alias = "ls")] + List { + /// Print more information. + #[clap(short, long)] + long: bool, + }, +} + /// Attempt to read secret meta data for a reference. fn find_secret_meta( cache: Arc>, @@ -249,39 +310,86 @@ fn read_file_secret(path: &str) -> Result { Ok(Secret::File { name, mime, buffer }) } +fn maybe_conflict(cache: Arc>, func: F) -> Result<()> +where + F: FnOnce(&mut RwLockWriteGuard<'_, Cache>) -> Result<()>, +{ + let mut writer = cache.write().unwrap(); + match func(&mut writer) { + Ok(_) => Ok(()), + Err(e) => match e { + Error::Conflict(info) => { + let local_hex = hex::encode(info.local.0); + let remote_hex = hex::encode(info.remote.0); + + let local_num = info.local.1; + let remote_num = info.remote.1; + + let should_pull = remote_num >= local_num; + //let sync_title = if should_pull { "pull" } else { "push" }; + + let banner = Banner::new() + .padding(Padding::one()) + .text(Cow::Borrowed("!!! CONFLICT !!!")) + .text(Cow::Owned( + format!("A conflict was detected on {}, proceed with caution; to resolve this conflict sync with the server.", info.summary.name()), + )) + .text(Cow::Owned(format!("local = {}\nremote = {}", local_hex, remote_hex))) + .text(Cow::Owned(format!("local = #{}, remote = #{}", local_num, remote_num))) + .render(); + println!("{}", banner); + + let options = [ + Choice( + "Pull remote changes from the server", + ConflictChoice::Pull, + ), + Choice( + "Push local changes to the server", + ConflictChoice::Push, + ), + Choice("None of the above", ConflictChoice::Noop), + ]; + + let prompt = + Some("Choose an action to resolve the conflict: "); + match choose(prompt, &options)? { + Some(choice) => match choice { + ConflictChoice::Pull => { + run_blocking(writer.force_pull(&info.summary)) + } + ConflictChoice::Push => { + run_blocking(writer.force_push(&info.summary)) + } + ConflictChoice::Noop => Ok(()), + }, + None => Ok(()), + } + } + _ => Err(e), + }, + } +} + /// Execute the program command. fn exec_program(program: Shell, cache: Arc>) -> Result<()> { match program.cmd { ShellCommand::Vaults => { let mut writer = cache.write().unwrap(); - let summaries = run_blocking(writer.load_summaries())?; + let summaries = run_blocking(writer.load_vaults())?; print::summaries_list(summaries); Ok(()) } ShellCommand::Create { name } => { let mut writer = cache.write().unwrap(); - let (passphrase, _) = generate()?; - let mut vault: Vault = Default::default(); - vault.set_name(name); - vault.initialize(&passphrase)?; - let buffer = encode(&vault)?; - - let response = run_blocking(writer.create_wal(buffer))?; - - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::VaultCreate(response.status().into()))?; - + let passphrase = run_blocking(writer.create_vault(name))?; display_passphrase("ENCRYPTION PASSPHRASE", &passphrase); - Ok(()) } ShellCommand::Remove { vault } => { let reader = cache.read().unwrap(); let summary = reader - .find_summary(&vault) + .find_vault(&vault) .ok_or(Error::VaultNotAvailable(vault.clone()))? .clone(); let prompt = format!( @@ -293,35 +401,23 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { if read_flag(Some(&prompt))? { let mut writer = cache.write().unwrap(); - let response = run_blocking(writer.delete_wal(&summary))?; - - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::VaultRemove(response.status().into()))?; + run_blocking(writer.remove_vault(&summary))?; } Ok(()) } ShellCommand::Use { vault } => { let reader = cache.read().unwrap(); - let summary = reader.find_summary(&vault).cloned(); + let summary = reader + .find_vault(&vault) + .cloned() + .ok_or(Error::VaultNotAvailable(vault))?; drop(reader); - if let Some(summary) = &summary { - let mut writer = cache.write().unwrap(); - let vault = run_blocking(writer.load_vault(summary))?; - let mut keeper = Gatekeeper::new(vault); - let password = read_password(Some("Passphrase: "))?; - if let Ok(_) = keeper.unlock(&password) { - writer.set_current(Some(keeper)); - Ok(()) - } else { - Err(Error::VaultUnlockFail) - } - } else { - Err(Error::VaultNotAvailable(vault)) - } + + let password = read_password(Some("Passphrase: "))?; + maybe_conflict(cache, |writer| { + run_blocking(writer.open_vault(&summary, &password)) + }) } ShellCommand::Info => { let reader = cache.read().unwrap(); @@ -369,27 +465,42 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { println!("{}", name); (false, keeper.summary().clone(), name.to_string()) }; + + drop(writer); if renamed { - let response = - run_blocking(writer.set_vault_name(&summary, &name))?; - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::SetVaultName(response.status().into()))?; + maybe_conflict(cache, |writer| { + run_blocking(writer.set_vault_name(&summary, &name)) + }) + } else { + Ok(()) } - Ok(()) } ShellCommand::Status => { let reader = cache.read().unwrap(); - let keeper = - reader.current().ok_or(Error::NoVaultSelected)?; + let keeper = reader.current().ok_or(Error::NoVaultSelected)?; let (client_proof, server_proof) = - run_blocking(reader.head_wal(keeper.summary()))?; - println!("client = {}", CommitHash(client_proof.0)); - println!("server = {}", CommitHash(server_proof.0)); + run_blocking(reader.vault_status(keeper.summary()))?; + println!("local = {}", CommitHash(client_proof.0)); + println!("remote = {}", CommitHash(server_proof.0)); Ok(()) - }, + } + ShellCommand::Tree => { + let reader = cache.read().unwrap(); + let keeper = reader.current().ok_or(Error::NoVaultSelected)?; + let summary = keeper.summary(); + if let Some(tree) = reader.wal_tree(summary) { + if let Some(leaves) = tree.leaves() { + for leaf in &leaves { + println!("{}", hex::encode(leaf)); + } + println!("leaves = {}", leaves.len()); + } + if let Some(root) = tree.root() { + println!("root = {}", hex::encode(root)); + } + } + Ok(()) + } ShellCommand::Add { cmd } => { let mut writer = cache.write().unwrap(); let keeper = @@ -411,14 +522,12 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { None }; + drop(writer); + if let Some((summary, event)) = result { - let response = - run_blocking(writer.patch_vault(&summary, vec![event]))?; - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::AddSecret(response.status().into())) + maybe_conflict(cache, |writer| { + run_blocking(writer.patch_vault(&summary, vec![event])) + }) } else { Ok(()) } @@ -439,13 +548,7 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { let event = event.into_owned(); print::secret(&secret_meta, &secret_data); - let response = - run_blocking(writer.patch_vault(&summary, vec![event]))?; - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::ReadSecret(response.status().into())) + run_blocking(writer.patch_vault(&summary, vec![event])) } else { Err(Error::SecretNotAvailable(secret)) } @@ -467,7 +570,7 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { drop(reader); - let (_uuid, secret_meta, secret_data) = + let (uuid, secret_meta, secret_data) = result.ok_or(Error::SecretNotAvailable(secret.clone()))?; let result = @@ -494,26 +597,24 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { writer.current_mut().ok_or(Error::NoVaultSelected)?; let summary = keeper.summary().clone(); - let vault_id = *keeper.id(); + let event = keeper - .update(&vault_id, secret_meta, edited_secret)? + .update(&uuid, secret_meta, edited_secret)? .ok_or(Error::SecretNotAvailable(secret))?; let event = event.into_owned(); - let response = - run_blocking(writer.patch_vault(&summary, vec![event]))?; - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::SetSecret(response.status().into())) + drop(writer); + + maybe_conflict(cache, |writer| { + run_blocking(writer.patch_vault(&summary, vec![event])) + }) + // If the edited result was borrowed // it indicates that no changes were made } else { Ok(()) } } - ShellCommand::Del { secret } => { let (uuid, secret_meta) = find_secret_meta(Arc::clone(&cache), &secret)? @@ -531,14 +632,12 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { // attempting to borrow mutably twice let event = event.into_owned(); - let response = run_blocking( - writer.patch_vault(&summary, vec![event]), - )?; - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::DelSecret(response.status().into())) + drop(writer); + maybe_conflict(cache, |writer| { + run_blocking( + writer.patch_vault(&summary, vec![event]), + ) + }) } else { Err(Error::SecretNotAvailable(secret)) } @@ -546,7 +645,6 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { Ok(()) } } - ShellCommand::Mv { secret, label } => { let (uuid, _) = find_secret_meta(Arc::clone(&cache), &secret)? .ok_or(Error::SecretNotAvailable(secret.clone()))?; @@ -585,28 +683,108 @@ fn exec_program(program: Shell, cache: Arc>) -> Result<()> { .ok_or(Error::SecretNotAvailable(secret.clone()))?; let event = event.into_owned(); - let response = - run_blocking(writer.patch_vault(&summary, vec![event]))?; - - response - .status() - .is_success() - .then_some(()) - .ok_or(Error::MvSecret(response.status().into())) + + drop(writer); + maybe_conflict(cache, |writer| { + run_blocking(writer.patch_vault(&summary, vec![event])) + }) } + ShellCommand::Snapshot { cmd } => match cmd { + SnapShot::Take => { + let reader = cache.read().unwrap(); + let keeper = + reader.current().ok_or(Error::NoVaultSelected)?; + let (snapshot, _) = reader.take_snapshot(keeper.summary())?; + println!("Path: {}", snapshot.0.display()); + println!("Time: {}", snapshot.1); + println!("Hash: {}", snapshot.2); + println!("Size: {}", human_bytes(snapshot.3 as f64)); + Ok(()) + } + SnapShot::List { long } => { + let reader = cache.read().unwrap(); + let keeper = + reader.current().ok_or(Error::NoVaultSelected)?; + let snapshots = reader.snapshots().list(keeper.id())?; + if !snapshots.is_empty() { + for snapshot in snapshots.into_iter() { + if long { + print!( + "{} {} ", + snapshot.0.display(), + human_bytes(snapshot.3 as f64) + ); + } + println!("{} {}", snapshot.1, snapshot.2); + } + } else { + println!("No snapshots yet!"); + } + Ok(()) + } + }, + ShellCommand::History { cmd } => { + match cmd { + History::Compact { snapshot } => { + let reader = cache.read().unwrap(); + let keeper = + reader.current().ok_or(Error::NoVaultSelected)?; + let summary = keeper.summary().clone(); + if snapshot { + reader.take_snapshot(&summary)?; + } + drop(reader); + + let prompt = Some("Compaction will remove history, are you sure (y/n)? "); + if read_flag(prompt)? { + let mut writer = cache.write().unwrap(); + let (old_size, new_size) = + run_blocking(writer.compact(&summary))?; + println!("Old: {}", human_bytes(old_size as f64)); + println!("New: {}", human_bytes(new_size as f64)); + } + Ok(()) + } + History::Check => { + let reader = cache.read().unwrap(); + let keeper = + reader.current().ok_or(Error::NoVaultSelected)?; + let wal_path = reader.wal_path(keeper.summary()); + // Verify checksums in the commit tree + wal_commit_tree(&wal_path, true, |_| {})?; + println!("Verified ✓"); + + Ok(()) + } + History::List { long } => { + let reader = cache.read().unwrap(); + let keeper = + reader.current().ok_or(Error::NoVaultSelected)?; + + let records = reader.history(keeper.summary())?; + for (record, event) in records { + let commit = CommitHash(record.commit()); + print!("{} {} ", event.event_kind(), record.time()); + if long { + println!("{}", commit); + } else { + print!("\n"); + } + } + Ok(()) + } + } + } ShellCommand::Whoami => { let reader = cache.read().unwrap(); - let address = reader.client().address()?; + let address = reader.address()?; println!("{}", address); Ok(()) } ShellCommand::Close => { let mut writer = cache.write().unwrap(); - if let Some(current) = writer.current_mut() { - current.lock(); - } - writer.set_current(None); + writer.close_vault(); Ok(()) } ShellCommand::Quit => { diff --git a/workspace/client/src/signup.rs b/workspace/client/src/signup.rs index 941905f52e..e3ce20d8a7 100644 --- a/workspace/client/src/signup.rs +++ b/workspace/client/src/signup.rs @@ -1,14 +1,15 @@ //! Signup a new account. -use crate::{display_passphrase, run_blocking, Client, Error, Result}; +use crate::{ + display_passphrase, run_blocking, Cache, Client, ClientCache, Error, + Result, +}; use sos_core::{ - address::AddressStr, - crypto::generate_random_ecdsa_signing_key, - diceware::generate, - signer::SingleParty, - vault::{encode, Vault}, + address::AddressStr, crypto::generate_random_ecdsa_signing_key, + generate_passphrase, signer::SingleParty, }; use sos_readline::read_flag; -use std::{path::PathBuf, sync::Arc}; +use std::{borrow::Cow, path::PathBuf, sync::Arc}; +use terminal_banner::{Banner, Padding}; use url::Url; use web3_keystore::encrypt; @@ -25,27 +26,37 @@ pub fn signup( let address: AddressStr = (&public_key).try_into()?; let keystore_file = destination.join(&format!("{}.json", address)); - if keystore_file.exists() { return Err(Error::FileExists(keystore_file)); } - let (keystore_passphrase, _) = generate()?; - let (encryption_passphrase, _) = generate()?; + let (keystore_passphrase, _) = generate_passphrase()?; + + let message = format!( + r#"* Write keystore file to {} +* Send the encrypted vault to {} +* Keystore passphrase will be displayed +* Encryption passphrase will be displayed"#, + keystore_file.display(), + server + ); + + let banner = Banner::new() + .padding(Padding::one()) + .text(Cow::Borrowed( + "Creating a new account will perform the following actions:", + )) + .text(Cow::Owned(message)) + .render(); - println!(); - println!("Creating a new account will perform the following actions:"); - println!(); - println!("* Write keystore file to {}", keystore_file.display()); - println!("* Send the encrypted vault to {}", server); - println!("* Keystore passphrase will be displayed"); - println!("* Encryption passphrase will be displayed"); - println!(); + println!("{}", banner); let prompt = Some("Are you sure (y/n)? "); if read_flag(prompt)? { let signer: SingleParty = (&signing_key).try_into()?; let client = Client::new(server, Arc::new(signer)); + let cache_dir = Cache::cache_dir()?; + let mut cache = Cache::new(client, cache_dir, true)?; let keystore = encrypt( &mut rand::thread_rng(), @@ -54,22 +65,10 @@ pub fn signup( Some(address.to_string()), )?; - let mut vault: Vault = Default::default(); - if let Some(name) = name { - vault.set_name(name); - } - vault.initialize(&encryption_passphrase)?; - - let buffer = encode(&vault)?; - let response = run_blocking(client.create_account(buffer))?; - if !response.status().is_success() { - return Err(Error::AccountCreate(response.status().into())); - } - + let encryption_passphrase = run_blocking(cache.create_account(name))?; std::fs::write(keystore_file, serde_json::to_string(&keystore)?)?; display_passphrase("KEYSTORE PASSPHRASE", &keystore_passphrase); - display_passphrase("ENCRYPTION PASSPHRASE", &encryption_passphrase); } diff --git a/workspace/core/Cargo.toml b/workspace/core/Cargo.toml index b14197ce6b..70f5ad4269 100644 --- a/workspace/core/Cargo.toml +++ b/workspace/core/Cargo.toml @@ -7,6 +7,8 @@ edition = "2021" async-trait = "0.1" aes-gcm = { version = "0.9", features = ["std"] } chacha20poly1305 = { version = "0.9", features = ["std"] } +tracing = "0.1" +filetime = "0.2" rand = "0.8" thiserror = "1" web3-signature = { version = "0.2.3", features = ["single-party"] } @@ -24,9 +26,8 @@ sha3 = "0.9" bip39 = { version = "1", features = ["std", "rand"] } uuid = { version = "1", features = ["serde", "v4"] } url = { version = "2.2", features = ["serde"] } -time = { version = "0.3", features = ["serde"] } +time = { version = "0.3", features = ["serde", "local-offset", "formatting"] } bitflags = "1" -rs_merkle = "1.2" ouroboros = "0.15" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] @@ -36,6 +37,10 @@ file-guard = "0.1" getrandom = {version = "0.2", features = ["js"]} uuid = { version = "1", features = ["serde", "v4", "js"] } +[dependencies.rs_merkle] +version = "1.2" +#path = "../../../../forks/rs-merkle" + [dependencies.serde-binary] version = "0.3.6" #path = "../../../serde-binary" diff --git a/workspace/core/src/commit_tree/mod.rs b/workspace/core/src/commit_tree/mod.rs index 674d274754..159181f125 100644 --- a/workspace/core/src/commit_tree/mod.rs +++ b/workspace/core/src/commit_tree/mod.rs @@ -1,5 +1,8 @@ //! Type for iterating and managing the commit trees for a vault. -use serde_binary::binary_rw::{BinaryReader, Endian, ReadStream, SeekStream}; +use serde_binary::{ + binary_rw::{BinaryReader, Endian, ReadStream, SeekStream}, + Decode, Deserializer, Encode, Result as BinaryResult, Serializer, +}; use std::ops::Range; use rs_merkle::{algorithms::Sha256, Hasher, MerkleProof, MerkleTree}; @@ -9,19 +12,10 @@ use crate::{ Error, Result, }; -pub mod integrity; +mod integrity; -/// Decode a merkle proof. -pub fn decode_proof>( - buffer: B, -) -> Result> { - Ok(MerkleProof::::from_bytes(buffer.as_ref())?) -} - -/// Encode a merkle proof. -pub fn encode_proof(proof: &MerkleProof) -> Vec { - proof.to_bytes() -} +pub use integrity::vault_commit_tree; +pub use integrity::wal_commit_tree; /// Compute the Sha256 hash of some data. pub fn hash(data: &[u8]) -> [u8; 32] { @@ -29,7 +23,72 @@ pub fn hash(data: &[u8]) -> [u8; 32] { } /// Represents a root hash and a proof of certain nodes. -pub struct CommitProof(pub ::Hash, pub MerkleProof); +//#[derive(Debug, Eq, PartialEq)] +pub struct CommitProof( + pub ::Hash, + pub MerkleProof, + pub usize, + pub Range, +); + +impl CommitProof { + /// The root hash for the proof. + pub fn root(&self) -> &::Hash { + &self.0 + } + + /// The root hash for the proof as hexadecimal. + pub fn root_hex(&self) -> String { + hex::encode(&self.0) + } + + /// Reduce this commit proof to it's root hash and leaves length. + pub fn reduce(self) -> ([u8; 32], usize) { + (self.0, self.2) + } +} + +impl Default for CommitProof { + fn default() -> Self { + Self([0; 32], MerkleProof::::new(vec![]), 0, 0..0) + } +} + +impl Encode for CommitProof { + fn encode(&self, ser: &mut Serializer) -> BinaryResult<()> { + ser.writer.write_bytes(&self.0)?; + let proof_bytes = self.1.to_bytes(); + ser.writer.write_u32(proof_bytes.len() as u32)?; + ser.writer.write_bytes(&proof_bytes)?; + + ser.writer.write_u32(self.2 as u32)?; + ser.writer.write_u32(self.3.start as u32)?; + ser.writer.write_u32(self.3.end as u32)?; + Ok(()) + } +} + +impl Decode for CommitProof { + fn decode(&mut self, de: &mut Deserializer) -> BinaryResult<()> { + let root_hash: [u8; 32] = + de.reader.read_bytes(32)?.as_slice().try_into()?; + self.0 = root_hash; + let length = de.reader.read_u32()?; + let proof_bytes = de.reader.read_bytes(length as usize)?; + let proof = MerkleProof::::from_bytes(&proof_bytes) + .map_err(Box::from)?; + + self.1 = proof; + self.2 = de.reader.read_u32()? as usize; + let start = de.reader.read_u32()?; + let end = de.reader.read_u32()?; + + // TODO: validate range start is <= range end + + self.3 = start as usize..end as usize; + Ok(()) + } +} /// The result of comparing two commit trees. /// @@ -41,7 +100,7 @@ pub enum Comparison { /// Trees are equal as their root commits match. Equal, /// Tree contains the other proof. - Contains(usize, [u8; 32]), + Contains(Vec, Vec<[u8; 32]>), /// Unable to find a match against the proof. Unknown, } @@ -120,43 +179,70 @@ impl CommitTree { /// Get the root hash and a proof of the last leaf node. pub fn head(&self) -> Result { + let range = self.tree.leaves_len() - 1..self.tree.leaves_len(); + self.proof_range(range) + } + + /// Get a proof for the given range. + pub fn proof_range(&self, indices: Range) -> Result { + let leaf_indices = indices.clone().map(|i| i).collect::>(); + self.proof(&leaf_indices) + } + + /// Get a proof for the given indices. + pub fn proof(&self, leaf_indices: &[usize]) -> Result { let root = self.root().ok_or(Error::NoRootCommit)?; - let leaf_indices = vec![self.tree.leaves_len() - 1]; let proof = self.tree.proof(&leaf_indices); - Ok(CommitProof(root, proof)) + // Map the usize array to a Range, implies all the elements + // are continuous, sparse indices are not supported + // + // Internally we use a range to represent the indices as these + // proofs are sent over the network. + let indices = if leaf_indices.len() == 0 { + 0..0 + } else { + if leaf_indices.len() > 1 { + leaf_indices[0]..leaf_indices[leaf_indices.len() - 1] + 1 + } else { + leaf_indices[0]..leaf_indices[0] + 1 + } + }; + Ok(CommitProof(root, proof, self.len(), indices)) } /// Compare this tree against another root hash and merkle proof. pub fn compare(&self, proof: CommitProof) -> Result { - let CommitProof(other_root, proof) = proof; + let CommitProof(other_root, proof, count, range) = proof; let root = self.root().ok_or(Error::NoRootCommit)?; if root == other_root { Ok(Comparison::Equal) } else { - let leaves = self.tree.leaves().unwrap_or_default(); - let it = leaves.into_iter().enumerate().rev(); - for (index, leaf) in it { - let indices = vec![index]; - let leaves = vec![leaf]; - let matched = proof.verify( + if range.start < self.len() && range.end < self.len() { + let leaves = self.tree.leaves().unwrap_or_default(); + let indices_to_prove = + range.clone().map(|i| i).collect::>(); + let leaves_to_prove = range + .map(|i| *leaves.get(i).unwrap()) + .collect::>(); + if proof.verify( other_root, - &indices, - leaves.as_slice(), - leaves.len(), - ); - if matched { - return Ok(Comparison::Contains(index, leaf)); + indices_to_prove.as_slice(), + leaves_to_prove.as_slice(), + count, + ) { + Ok(Comparison::Contains( + indices_to_prove, + leaves_to_prove, + )) + } else { + Ok(Comparison::Unknown) } + } else { + Ok(Comparison::Unknown) } - Ok(Comparison::Unknown) } } - /// Get a commit proof for the given leaf indices. - pub fn proof(&self, leaf_indices: &[usize]) -> MerkleProof { - self.tree.proof(leaf_indices) - } - /// Get the root hash of the underlying merkle tree. pub fn root(&self) -> Option<::Hash> { self.tree.root() diff --git a/workspace/core/src/constants.rs b/workspace/core/src/constants.rs new file mode 100644 index 0000000000..93c9cc904c --- /dev/null +++ b/workspace/core/src/constants.rs @@ -0,0 +1,50 @@ +//! Constants shared between the client and server. + +/// Constants for file identity. +mod identity { + /// Audit log identity magic bytes (SOSA). + pub const AUDIT_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x41]; + + /// Write-ahead log identity magic bytes (SOSW). + pub const WAL_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x57]; + + /// Patch file identity magic bytes (SOSP). + pub const PATCH_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x50]; + + /// Vault file identity magic bytes (SOSV). + pub const VAULT_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x56]; +} + +/// Constants for file extensions. +mod extensions { + /// File extension used for WAL files. + pub const WAL_EXT: &str = "wal"; + + /// File extension used when deleting WAL files. + pub const WAL_DELETED_EXT: &str = "wal.deleted"; + + /// File extension used for vault files. + pub const VAULT_EXT: &str = "vault"; + + /// File extension used when creating a vault file backup. + pub const VAULT_BACKUP_EXT: &str = "vault.backup"; + + /// File extension used for patch files. + pub const PATCH_EXT: &str = "patch"; +} + +/// Constants for header names. +mod headers { + /// Constant for the signed message header. + pub const X_SIGNED_MESSAGE: &str = "x-signed-message"; + + /// Constant for the commit proof header. + pub const X_COMMIT_PROOF: &str = "x-commit-proof"; + + /// Constant for the match proof header. + pub const X_MATCH_PROOF: &str = "x-match-proof"; +} + +pub use extensions::*; +pub use headers::*; +pub use identity::*; diff --git a/workspace/core/src/crypto/mod.rs b/workspace/core/src/crypto/mod.rs index 9a57c000f9..166c799bf3 100644 --- a/workspace/core/src/crypto/mod.rs +++ b/workspace/core/src/crypto/mod.rs @@ -135,12 +135,6 @@ pub mod algorithms { } } -/// Type identifiers for ECDSA keys. -pub mod types { - /// Represents the k256 (secp256k1) single party key. - pub const K256: u8 = 0x01; -} - /// Enumeration of the sizes for nonces. #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub enum Nonce { diff --git a/workspace/core/src/diceware.rs b/workspace/core/src/diceware.rs index 80e4e46980..30bcb41659 100644 --- a/workspace/core/src/diceware.rs +++ b/workspace/core/src/diceware.rs @@ -7,8 +7,8 @@ use chbs::{ word::{WordList, WordSampler}, }; -/// Generate a passphrase and the entropy in bits. -pub fn generate_passphrase( +/// Generate a passphrase using the given config. +fn generate_passphrase_config( config: Option>, ) -> Result<(String, f64)> { let config = if let Some(config) = config { @@ -26,16 +26,16 @@ pub fn generate_passphrase( Ok((scheme.generate(), scheme.entropy().bits())) } -/// Generate a passphrase with the given number of words. +/// Generate a diceware passphrase with the given number of words. /// /// The number of words must be at least six. pub fn generate_passphrase_words(words: u8) -> Result<(String, f64)> { let config = default_config(words as usize); - generate_passphrase(Some(config)) + generate_passphrase_config(Some(config)) } -/// Generate a passphrase with six words. -pub fn generate() -> Result<(String, f64)> { +/// Generate a diceware passphrase with six words which is ~171 bits of entropy. +pub fn generate_passphrase() -> Result<(String, f64)> { generate_passphrase_words(6) } diff --git a/workspace/core/src/error.rs b/workspace/core/src/error.rs index 2deec41af2..75e660d54e 100644 --- a/workspace/core/src/error.rs +++ b/workspace/core/src/error.rs @@ -6,6 +6,10 @@ use uuid::Uuid; /// Error thrown by the core library. #[derive(Debug, Error)] pub enum Error { + /// Error generated when a directory is expected. + #[error("path {0} is not a directory")] + NotDirectory(PathBuf), + /// Error generated attempting acquire a lock on a file that is already locked. #[error("file {0} is already locked")] FileLocked(PathBuf), @@ -186,4 +190,12 @@ pub enum Error { /// Error generated by the merkle tree library. #[error(transparent)] Merkle(#[from] rs_merkle::Error), + + /// Error generated converting time types. + #[error(transparent)] + Time(#[from] time::error::ComponentRange), + + /// Error generated formatting time. + #[error(transparent)] + TimeFormat(#[from] time::error::Format), } diff --git a/workspace/core/src/events/audit.rs b/workspace/core/src/events/audit.rs index c1b085ad1b..d71d25b28b 100644 --- a/workspace/core/src/events/audit.rs +++ b/workspace/core/src/events/audit.rs @@ -30,10 +30,10 @@ pub trait AuditProvider { /// Error type for this implementation. type Error; - /// Append an audit log record to a destination. - async fn append_audit_event( + /// Append audit log records to a destination. + async fn append_audit_events( &mut self, - logs: AuditEvent, + events: &[AuditEvent], ) -> std::result::Result<(), Self::Error>; } @@ -123,7 +123,6 @@ impl AuditEvent { SyncEvent::CreateVault(_) | SyncEvent::ReadVault | SyncEvent::DeleteVault - | SyncEvent::UpdateVault(_) | SyncEvent::GetVaultName | SyncEvent::SetVaultName(_) | SyncEvent::SetVaultMeta(_) => AuditData::Vault(vault_id), diff --git a/workspace/core/src/events/change.rs b/workspace/core/src/events/change.rs index 13fb5bffa2..9427640705 100644 --- a/workspace/core/src/events/change.rs +++ b/workspace/core/src/events/change.rs @@ -125,10 +125,6 @@ impl ChangeEvent { address: *address, vault_id: *vault_id, }), - SyncEvent::UpdateVault(_) => Some(ChangeEvent::UpdateVault { - address: *address, - vault_id: *vault_id, - }), SyncEvent::DeleteVault => Some(ChangeEvent::DeleteVault { address: *address, vault_id: *vault_id, diff --git a/workspace/core/src/events/mod.rs b/workspace/core/src/events/mod.rs index c022ab13d2..1936ac61f7 100644 --- a/workspace/core/src/events/mod.rs +++ b/workspace/core/src/events/mod.rs @@ -11,7 +11,7 @@ mod wal; pub use audit::{AuditData, AuditEvent, AuditProvider}; pub use change::ChangeEvent; -pub use patch::Patch; +pub use patch::{Patch, PatchFile}; pub use sync::SyncEvent; pub use types::EventKind; pub use wal::WalEvent; diff --git a/workspace/core/src/events/patch.rs b/workspace/core/src/events/patch.rs index 02d37862e6..65bc8b24af 100644 --- a/workspace/core/src/events/patch.rs +++ b/workspace/core/src/events/patch.rs @@ -1,12 +1,20 @@ //! Patch represents a changeset of events to apply to a vault. use serde_binary::{ + binary_rw::{BinaryWriter, Endian, FileStream, OpenType, SeekStream}, Decode, Deserializer, Encode, Error as BinaryError, Result as BinaryResult, Serializer, }; +use std::{ + fs::{File, OpenOptions}, + io::{Seek, SeekFrom, Write}, + path::{Path, PathBuf}, +}; use crate::{ + constants::{PATCH_EXT, PATCH_IDENTITY}, + decode, encode, events::SyncEvent, - file_identity::{FileIdentity, PATCH_IDENTITY}, + FileIdentity, Result, }; /// Patch wraps a changeset of events to be sent across the network. @@ -30,10 +38,161 @@ impl Decode for Patch<'_> { .map_err(|e| BinaryError::Boxed(Box::from(e)))?; let length = de.reader.read_u32()?; for _ in 0..length { - let mut event: SyncEvent = Default::default(); + let mut event: SyncEvent<'static> = Default::default(); event.decode(&mut *de)?; self.0.push(event); } Ok(()) } } + +/// Caches a collection of events on disc which can be used +/// by clients to store changes that have not yet been applied +/// to a remote server. +pub struct PatchFile { + file: File, + file_path: PathBuf, +} + +impl PatchFile { + /// Create a new patch file. + pub fn new>(path: P) -> Result { + let file_path = path.as_ref().to_path_buf(); + + if !file_path.exists() { + File::create(path.as_ref())?; + } + + let mut file = OpenOptions::new() + .write(true) + .append(true) + .open(path.as_ref())?; + + let size = file.metadata()?.len(); + if size == 0 { + let patch: Patch = Default::default(); + let buffer = encode(&patch)?; + file.write_all(&buffer)?; + } + + Ok(Self { file, file_path }) + } + + /// The file extension for patch files. + pub fn extension() -> &'static str { + PATCH_EXT + } + + /// Append some events to this patch file. + /// + /// Returns a collection of events; if this patch file was empty + /// beforehand the collection equals the passed events otherwise + /// it will be any existing events loaded from disc with the given + /// events appended. + pub fn append<'a>( + &mut self, + mut events: Vec>, + ) -> Result> { + let len = self.file_path.metadata()?.len() as usize; + + // Load any existing events in to memory + let mut all_events = if len > PATCH_IDENTITY.len() { + let patch = self.read()?; + patch.0 + } else { + vec![] + }; + + // Append the incoming events to the file + let mut events_buffer = Vec::new(); + for event in events.iter() { + let event_buffer = encode(event)?; + events_buffer.extend_from_slice(&event_buffer); + } + self.file.write_all(&events_buffer)?; + + // Append the given events on to any existing events + // so we can return a new patch to the caller + all_events.append(&mut events); + + // Write out the new length + self.write_events_len(all_events.len() as u32)?; + + Ok(Patch(all_events)) + } + + fn write_events_len(&self, length: u32) -> Result<()> { + let mut stream = + FileStream::new(&self.file_path, OpenType::ReadWrite)?; + let mut writer = BinaryWriter::new(&mut stream, Endian::Big); + writer.seek(PATCH_IDENTITY.len())?; + writer.write_u32(length)?; + Ok(()) + } + + /// Read a patch from the file on disc. + pub fn read(&self) -> Result> { + let buffer = std::fs::read(&self.file_path)?; + let patch: Patch = decode(&buffer)?; + Ok(patch) + } + + /// Truncate the file to an empty patch list. + /// + /// This should be called when a client has successfully + /// applied a patch to the remote and local WAL files to + /// remove any pending events. + pub fn truncate(&mut self) -> Result<()> { + self.file.set_len(0)?; + self.file.seek(SeekFrom::Start(0))?; + + let patch: Patch = Default::default(); + let buffer = encode(&patch)?; + self.file.write_all(&buffer)?; + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::*; + use anyhow::Result; + use tempfile::NamedTempFile; + + #[test] + fn patch_file() -> Result<()> { + let temp = NamedTempFile::new()?; + let mut patch_file = PatchFile::new(temp.path())?; + + let mut vault = mock_vault(); + let (encryption_key, _) = mock_encryption_key()?; + let (_, _, _, _, mock_event) = + mock_vault_note(&mut vault, &encryption_key, "foo", "bar")?; + + // Empty patch file is 8 bytes + assert_eq!(8, temp.path().metadata()?.len()); + + let events = vec![mock_event.clone()]; + + let patch = patch_file.append(events)?; + let new_len = temp.path().metadata()?.len(); + assert!(new_len > 8); + assert_eq!(1, patch.0.len()); + + let more_events = vec![mock_event.clone()]; + let next_patch = patch_file.append(more_events)?; + let more_len = temp.path().metadata()?.len(); + assert!(more_len > new_len); + assert_eq!(2, next_patch.0.len()); + + let disc_patch = patch_file.read()?; + assert_eq!(2, disc_patch.0.len()); + + // Truncate the file + patch_file.truncate()?; + assert_eq!(8, temp.path().metadata()?.len()); + + Ok(()) + } +} diff --git a/workspace/core/src/events/sync.rs b/workspace/core/src/events/sync.rs index 07c2c8ed14..4e4b5c612a 100644 --- a/workspace/core/src/events/sync.rs +++ b/workspace/core/src/events/sync.rs @@ -44,9 +44,6 @@ pub enum SyncEvent<'a> { /// SyncEvent used to indicate that a vault was read. ReadVault, - /// SyncEvent used to indicate that a vault was updated. - UpdateVault(Cow<'a, [u8]>), - /// SyncEvent used to indicate a vault was deleted. DeleteVault, @@ -107,7 +104,6 @@ impl SyncEvent<'_> { SyncEvent::Noop => EventKind::Noop, SyncEvent::CreateVault(_) => EventKind::CreateVault, SyncEvent::ReadVault => EventKind::ReadVault, - SyncEvent::UpdateVault(_) => EventKind::UpdateVault, SyncEvent::DeleteVault => EventKind::DeleteVault, SyncEvent::GetVaultName => EventKind::GetVaultName, SyncEvent::SetVaultName(_) => EventKind::SetVaultName, @@ -131,9 +127,6 @@ impl SyncEvent<'_> { SyncEvent::CreateVault(Cow::Owned(value.into_owned())) } SyncEvent::ReadVault => SyncEvent::ReadVault, - SyncEvent::UpdateVault(value) => { - SyncEvent::UpdateVault(Cow::Owned(value.into_owned())) - } SyncEvent::DeleteVault => SyncEvent::DeleteVault, SyncEvent::GetVaultName => SyncEvent::GetVaultName, SyncEvent::SetVaultName(value) => { @@ -165,10 +158,6 @@ impl<'a> Encode for SyncEvent<'a> { ser.writer.write_u32(vault.as_ref().len() as u32)?; ser.writer.write_bytes(vault.as_ref())?; } - SyncEvent::UpdateVault(vault) => { - ser.writer.write_u32(vault.as_ref().len() as u32)?; - ser.writer.write_bytes(vault.as_ref())?; - } SyncEvent::ReadVault | SyncEvent::DeleteVault | SyncEvent::GetVaultName => {} @@ -214,11 +203,6 @@ impl<'a> Decode for SyncEvent<'a> { EventKind::ReadVault => { *self = SyncEvent::ReadVault; } - EventKind::UpdateVault => { - let length = de.reader.read_u32()?; - let buffer = de.reader.read_bytes(length as usize)?; - *self = SyncEvent::UpdateVault(Cow::Owned(buffer)); - } EventKind::DeleteVault => { *self = SyncEvent::DeleteVault; } diff --git a/workspace/core/src/events/wal.rs b/workspace/core/src/events/wal.rs index 35a800636e..6896d6a6ae 100644 --- a/workspace/core/src/events/wal.rs +++ b/workspace/core/src/events/wal.rs @@ -24,9 +24,6 @@ pub enum WalEvent<'a> { /// Create a new vault. CreateVault(Cow<'a, [u8]>), - /// Update the vault buffer. - UpdateVault(Cow<'a, [u8]>), - /// Set the vault name. SetVaultName(Cow<'a, str>), @@ -45,11 +42,10 @@ pub enum WalEvent<'a> { impl WalEvent<'_> { /// Get the event kind for this event. - fn event_kind(&self) -> EventKind { + pub fn event_kind(&self) -> EventKind { match self { WalEvent::Noop => EventKind::Noop, WalEvent::CreateVault(_) => EventKind::CreateVault, - WalEvent::UpdateVault(_) => EventKind::UpdateVault, WalEvent::SetVaultName(_) => EventKind::SetVaultName, WalEvent::SetVaultMeta(_) => EventKind::SetVaultMeta, WalEvent::CreateSecret(_, _) => EventKind::CreateSecret, @@ -66,7 +62,7 @@ impl<'a> Encode for WalEvent<'a> { match self { WalEvent::Noop => panic!("WalEvent: attempt to encode a noop"), - WalEvent::CreateVault(vault) | WalEvent::UpdateVault(vault) => { + WalEvent::CreateVault(vault) => { ser.writer.write_u32(vault.as_ref().len() as u32)?; ser.writer.write_bytes(vault.as_ref())?; } @@ -106,12 +102,6 @@ impl<'a> Decode for WalEvent<'a> { let buffer = de.reader.read_bytes(length as usize)?; *self = WalEvent::CreateVault(Cow::Owned(buffer)); } - EventKind::UpdateVault => { - let length = de.reader.read_u32()?; - let buffer = de.reader.read_bytes(length as usize)?; - *self = WalEvent::UpdateVault(Cow::Owned(buffer)); - } - EventKind::SetVaultName => { let name = de.reader.read_string()?; *self = WalEvent::SetVaultName(Cow::Owned(name)); @@ -160,9 +150,6 @@ impl<'a> TryFrom> for WalEvent<'a> { SyncEvent::CreateVault(value) => { Ok(WalEvent::CreateVault(value.clone())) } - SyncEvent::UpdateVault(value) => { - Ok(WalEvent::UpdateVault(value.clone())) - } SyncEvent::SetVaultName(name) => { Ok(WalEvent::SetVaultName(name.clone())) } diff --git a/workspace/core/src/file_access.rs b/workspace/core/src/file_access.rs index 78c3840810..4227f55462 100644 --- a/workspace/core/src/file_access.rs +++ b/workspace/core/src/file_access.rs @@ -19,18 +19,18 @@ use serde_binary::{ use uuid::Uuid; use crate::{ + constants::VAULT_IDENTITY, + crypto::AeadPack, events::SyncEvent, - file_identity::{FileIdentity, VAULT_IDENTITY}, secret::SecretId, vault::{ - encode, CommitHash, Contents, Header, Summary, VaultAccess, - VaultCommit, VaultEntry, + encode, Contents, Header, Summary, VaultAccess, VaultCommit, + VaultEntry, }, - Result, + CommitHash, FileIdentity, Result, }; -/// Wrapper type for accessing a vault file that manages -/// an underlying file stream. +/// Implements access to an encrypted vault backed by a file on disc. pub struct VaultFileAccess { file_path: PathBuf, stream: Mutex, @@ -213,12 +213,32 @@ impl VaultAccess for VaultFileAccess { Ok(SyncEvent::SetVaultName(Cow::Owned(name))) } + fn set_vault_meta( + &mut self, + meta_data: Option, + ) -> Result> { + let content_offset = self.check_identity()?; + let mut header = Header::read_header_file(&self.file_path)?; + header.set_meta(meta_data.clone()); + self.write_header(content_offset, &header)?; + Ok(SyncEvent::SetVaultMeta(Cow::Owned(meta_data))) + } + fn create( &mut self, commit: CommitHash, secret: VaultEntry, ) -> Result> { let id = Uuid::new_v4(); + self.insert(id, commit, secret) + } + + fn insert( + &mut self, + id: SecretId, + commit: CommitHash, + secret: VaultEntry, + ) -> Result> { let content_offset = self.check_identity()?; let total_rows = self.rows(content_offset)?; diff --git a/workspace/core/src/file_identity.rs b/workspace/core/src/file_identity.rs index b4d5491bcf..4c5d8938fe 100644 --- a/workspace/core/src/file_identity.rs +++ b/workspace/core/src/file_identity.rs @@ -1,26 +1,10 @@ -//! Type that represents the magic identity bytes for file formats. -use serde_binary::{ - Decode, Deserializer, Encode, Error as BinaryError, - Result as BinaryResult, Serializer, -}; +//! Helper that reads and writes the magic identity bytes for file formats. +use serde_binary::{Deserializer, Serializer}; use crate::{Error, Result}; -/// Aduit log identity magic bytes (SOSA). -pub const AUDIT_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x41]; - -/// Write-ahead log identity magic bytes (SOSW). -pub const WAL_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x57]; - -/// Patch file identity magic bytes (SOSP). -pub const PATCH_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x50]; - -/// Vault file identity magic bytes (SOSV). -pub const VAULT_IDENTITY: [u8; 4] = [0x53, 0x4F, 0x53, 0x56]; - /// Read and write the identity bytes for a file. -#[derive(Debug, Eq, PartialEq)] -pub struct FileIdentity(pub [u8; 4]); +pub struct FileIdentity; impl FileIdentity { /// Read the identity magic bytes from a slice. @@ -51,19 +35,13 @@ impl FileIdentity { } Ok(()) } -} -impl Encode for FileIdentity { - fn encode(&self, ser: &mut Serializer) -> BinaryResult<()> { - ser.writer.write_bytes(&self.0)?; - Ok(()) - } -} - -impl Decode for FileIdentity { - fn decode(&mut self, de: &mut Deserializer) -> BinaryResult<()> { - FileIdentity::read_identity(de, &self.0) - .map_err(|e| BinaryError::Boxed(Box::from(e)))?; + /// Write the identity magic bytes. + pub fn write_identity( + ser: &mut Serializer, + identity: &[u8], + ) -> Result<()> { + ser.writer.write_bytes(identity)?; Ok(()) } } diff --git a/workspace/core/src/gatekeeper.rs b/workspace/core/src/gatekeeper.rs index 3ef6c6ebbe..b8c31d9f1c 100644 --- a/workspace/core/src/gatekeeper.rs +++ b/workspace/core/src/gatekeeper.rs @@ -1,19 +1,4 @@ //! Gatekeeper manages access to a vault. -//! -//! It stores the private key in memory so should only be used on client -//! implementations. -//! -//! Calling `lock()` will zeroize the private key in memory and prevent -//! any access to the vault until `unlock()` is called successfully. -//! -//! To allow for meta data to be displayed before secret decryption -//! certain parts of a vault are encrypted separately which means that -//! technically it would be possible to use different private keys for -//! different secrets and for the meta data however this would be -//! a very poor user experience and would lead to confusion so the -//! gatekeeper is also responsible for ensuring the same private key -//! is used to encrypt the different chunks. -//! use crate::{ crypto::{secret_key::SecretKey, AeadPack}, decode, encode, @@ -26,13 +11,29 @@ use std::collections::HashMap; use uuid::Uuid; use zeroize::Zeroize; -/// Manage access to a vault's secrets. +/// Access to an in-memory vault optionally mirroring changes to disc. +/// +/// It stores the private key in memory so should only be used on client +/// implementations. +/// +/// Calling `lock()` will zeroize the private key in memory and prevent +/// any access to the vault until `unlock()` is called successfully. +/// +/// To allow for meta data to be displayed before secret decryption +/// certain parts of a vault are encrypted separately which means that +/// technically it would be possible to use different private keys for +/// different secrets and for the meta data however this would be +/// a very poor user experience and would lead to confusion so the +/// gatekeeper is also responsible for ensuring the same private key +/// is used to encrypt the different chunks. #[derive(Default)] pub struct Gatekeeper { /// The private key. - private_key: Option>, + private_key: Option, /// The underlying vault. vault: Vault, + /// Mirror for in-memory vault changes. + mirror: Option>, } impl Gatekeeper { @@ -41,6 +42,19 @@ impl Gatekeeper { Self { vault, private_key: None, + mirror: None, + } + } + + /// Create a new gatekeeper with a mirror. + pub fn new_mirror( + vault: Vault, + mirror: Box, + ) -> Self { + Self { + vault, + private_key: None, + mirror: Some(mirror), } } @@ -75,7 +89,10 @@ impl Gatekeeper { } /// Set the public name for the vault. - pub fn set_vault_name(&mut self, name: String) -> Result { + pub fn set_vault_name(&mut self, name: String) -> Result> { + if let Some(mirror) = self.mirror.as_mut() { + mirror.set_vault_name(name.clone())?; + } self.vault.set_vault_name(name) } @@ -88,7 +105,7 @@ impl Gatekeeper { ) -> Result<()> { // Initialize the private key and store the salt let private_key = self.vault.initialize(password.as_ref())?; - self.private_key = Some(Box::new(private_key)); + self.private_key = Some(private_key); // Assign the label to the meta data let mut init_meta_data: VaultMeta = Default::default(); @@ -151,12 +168,15 @@ impl Gatekeeper { } /// Set the meta data for the vault. - fn set_meta(&mut self, meta_data: VaultMeta) -> Result<()> { + // TODO: rename to set_vault_meta() for consistency + fn set_meta(&mut self, meta_data: VaultMeta) -> Result> { if let Some(private_key) = &self.private_key { let meta_blob = encode(&meta_data)?; let meta_aead = self.vault.encrypt(private_key, &meta_blob)?; - self.vault.header_mut().set_meta(Some(meta_aead)); - Ok(()) + if let Some(mirror) = self.mirror.as_mut() { + mirror.set_vault_meta(Some(meta_aead.clone()))?; + } + self.vault.set_vault_meta(Some(meta_aead)) } else { Err(Error::VaultLocked) } @@ -218,7 +238,7 @@ impl Gatekeeper { &mut self, secret_meta: SecretMeta, secret: Secret, - ) -> Result { + ) -> Result> { // TODO: use cached in-memory meta data let meta = self.meta_data()?; @@ -237,9 +257,22 @@ impl Gatekeeper { self.vault.encrypt(private_key, &secret_blob)?; let (commit, _) = Vault::commit_hash(&meta_aead, &secret_aead)?; - Ok(self - .vault - .create(commit, VaultEntry(meta_aead, secret_aead))?) + + let id = Uuid::new_v4(); + + if let Some(mirror) = self.mirror.as_mut() { + mirror.insert( + id, + commit.clone(), + VaultEntry(meta_aead.clone(), secret_aead.clone()), + )?; + } + + Ok(self.vault.insert( + id, + commit, + VaultEntry(meta_aead, secret_aead), + )?) } else { Err(Error::VaultLocked) } @@ -249,7 +282,7 @@ impl Gatekeeper { pub fn read( &self, id: &SecretId, - ) -> Result> { + ) -> Result)>> { let payload = SyncEvent::ReadSecret(*id); Ok(self .read_secret(id)? @@ -262,7 +295,7 @@ impl Gatekeeper { id: &SecretId, secret_meta: SecretMeta, secret: Secret, - ) -> Result> { + ) -> Result>> { // TODO: use cached in-memory meta data let meta = self.meta_data()?; @@ -292,6 +325,15 @@ impl Gatekeeper { self.vault.encrypt(private_key, &secret_blob)?; let (commit, _) = Vault::commit_hash(&meta_aead, &secret_aead)?; + + if let Some(mirror) = self.mirror.as_mut() { + mirror.update( + id, + commit.clone(), + VaultEntry(meta_aead.clone(), secret_aead.clone()), + )?; + } + Ok(self.vault.update( id, commit, @@ -303,7 +345,10 @@ impl Gatekeeper { } /// Delete a secret and it's meta data from the vault. - pub fn delete(&mut self, id: &SecretId) -> Result> { + pub fn delete(&mut self, id: &SecretId) -> Result>> { + if let Some(mirror) = self.mirror.as_mut() { + mirror.delete(id)?; + } self.vault.delete(id) } @@ -339,7 +384,7 @@ impl Gatekeeper { if let Some(salt) = self.vault.salt() { let salt = SecretKey::parse_salt(salt)?; let private_key = SecretKey::derive_32(passphrase, &salt)?; - self.private_key = Some(Box::new(private_key)); + self.private_key = Some(private_key); self.vault_meta() } else { Err(Error::VaultNotInit) diff --git a/workspace/core/src/hash.rs b/workspace/core/src/hash.rs new file mode 100644 index 0000000000..45ea0f1f2f --- /dev/null +++ b/workspace/core/src/hash.rs @@ -0,0 +1,35 @@ +//! Hash utility types and functions. +use serde::{Deserialize, Serialize}; +use std::fmt; + +/// Newtype for a 32 byte hash that provides a hexadecimal +/// display implementation. +#[derive( + Default, Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, +)] +pub struct CommitHash(pub [u8; 32]); + +impl AsRef<[u8; 32]> for CommitHash { + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } +} + +impl CommitHash { + /// Get a copy of the underlying bytes for the commit hash. + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } +} + +impl From for [u8; 32] { + fn from(value: CommitHash) -> Self { + value.0 + } +} + +impl fmt::Display for CommitHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", hex::encode(&self.0)) + } +} diff --git a/workspace/core/src/headers.rs b/workspace/core/src/headers.rs deleted file mode 100644 index 57ba23edaf..0000000000 --- a/workspace/core/src/headers.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! Constants for header names. - -/// Constant for the signed message header. -pub const X_SIGNED_MESSAGE: &str = "x-signed-message"; - -/// Constant for the commit hash header. -pub const X_COMMIT_HASH: &str = "x-commit-hash"; - -/// Constant for the commit proof header. -pub const X_COMMIT_PROOF: &str = "x-commit-proof"; diff --git a/workspace/core/src/lib.rs b/workspace/core/src/lib.rs index 6035167b89..ab0a90814a 100644 --- a/workspace/core/src/lib.rs +++ b/workspace/core/src/lib.rs @@ -3,25 +3,23 @@ pub mod address; pub mod commit_tree; +pub mod constants; pub mod crypto; -pub mod diceware; +mod diceware; mod error; pub mod events; -pub mod file_access; -pub mod file_identity; +mod file_access; +mod file_identity; #[cfg(not(target_arch = "wasm32"))] -pub mod file_locks; +mod file_locks; -pub mod gatekeeper; -pub mod headers; - -#[deprecated] -pub mod passphrase; +mod gatekeeper; +mod hash; pub mod secret; pub mod signer; -pub mod timestamp; +mod timestamp; pub mod vault; pub mod wal; @@ -36,7 +34,15 @@ pub use web3_signature; pub use crypto::algorithms::Algorithm; pub use vault::{decode, encode}; +pub use diceware::{generate_passphrase, generate_passphrase_words}; pub use error::Error; +pub use file_access::VaultFileAccess; +pub use file_identity::FileIdentity; +#[cfg(not(target_arch = "wasm32"))] +pub use file_locks::FileLocks; +pub use gatekeeper::Gatekeeper; +pub use hash::CommitHash; +pub use timestamp::Timestamp; /// Result type for the core library. pub type Result = std::result::Result; diff --git a/workspace/core/src/passphrase.rs b/workspace/core/src/passphrase.rs deleted file mode 100644 index 375b882152..0000000000 --- a/workspace/core/src/passphrase.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! Utility to generate bip39 passphrase mnemonics. -//! -//! Word count must be 12, 18 or 24. -//! -use crate::{Error, Result}; -use bip39::{Language, Mnemonic}; -use std::str::FromStr; - -/// Generate a bip39 mnemonic in the given language. -pub fn mnemonic_in( - language: Language, - word_count: WordCount, -) -> Result { - let word_count: u16 = word_count.into(); - Ok(Mnemonic::generate_in(language, word_count as usize)?) -} - -/// Generate bip39 passphrase words in the given language. -pub fn words_in(language: Language, word_count: WordCount) -> Result { - Ok(format!("{}", mnemonic_in(language, word_count)?)) -} - -/// Generate bip39 passphrase words in English. -pub fn words(word_count: WordCount) -> Result { - Ok(format!("{}", mnemonic_in(Language::English, word_count)?)) -} - -/// Variants for the number of words supported by -/// the BIP39 mnemonic generation algorithm. -#[derive(Debug)] -pub enum WordCount { - /// Short number of words. - Short(u16), - /// Medium number of words. - Medium(u16), - /// Long number of words. - Long(u16), -} - -impl Default for WordCount { - fn default() -> Self { - Self::Short(12) - } -} - -impl From for u16 { - fn from(value: WordCount) -> u16 { - match value { - WordCount::Short(value) => value, - WordCount::Medium(value) => value, - WordCount::Long(value) => value, - } - } -} - -impl FromStr for WordCount { - type Err = Error; - fn from_str(s: &str) -> std::result::Result { - let value: u16 = s.parse()?; - WordCount::try_from(value) - } -} - -impl TryFrom for WordCount { - type Error = Error; - fn try_from(value: u16) -> std::result::Result { - match value { - 12 => Ok(WordCount::Short(value)), - 18 => Ok(WordCount::Medium(value)), - 24 => Ok(WordCount::Long(value)), - _ => Err(Error::InvalidWordCount), - } - } -} - -#[cfg(test)] -mod tests { - use super::{mnemonic_in, WordCount}; - use anyhow::Result; - use bip39::Language; - - #[test] - fn generate_passphrase() -> Result<()> { - let word_count = 12; - let passphrase = - mnemonic_in(Language::English, WordCount::Short(word_count))?; - let words = format!("{}", passphrase); - let items: Vec<&str> = words.split(' ').collect(); - assert_eq!(word_count as usize, items.len()); - Ok(()) - } -} diff --git a/workspace/core/src/test_utils.rs b/workspace/core/src/test_utils.rs index 6e496e7180..26a0b485f1 100644 --- a/workspace/core/src/test_utils.rs +++ b/workspace/core/src/test_utils.rs @@ -1,10 +1,11 @@ use crate::{ crypto::secret_key::SecretKey, - diceware::generate_passphrase, events::{SyncEvent, WalEvent}, + generate_passphrase, secret::{Secret, SecretId, SecretMeta}, - vault::{encode, CommitHash, Vault, VaultAccess, VaultEntry}, + vault::{encode, Vault, VaultAccess, VaultEntry}, wal::{file::WalFile, WalProvider}, + CommitHash, }; use std::{borrow::Cow, io::Write}; use uuid::Uuid; @@ -16,7 +17,7 @@ use argon2::password_hash::SaltString; pub fn mock_encryption_key() -> Result<(SecretKey, SaltString)> { let salt = SecretKey::generate_salt(); - let (passphrase, _) = generate_passphrase(None)?; + let (passphrase, _) = generate_passphrase()?; let encryption_key = SecretKey::derive_32(&passphrase, &salt)?; Ok((encryption_key, salt)) } diff --git a/workspace/core/src/timestamp.rs b/workspace/core/src/timestamp.rs index 76048e7537..578f366ba3 100644 --- a/workspace/core/src/timestamp.rs +++ b/workspace/core/src/timestamp.rs @@ -8,10 +8,17 @@ use serde_binary::{ }; use std::fmt; -use time::{Duration, OffsetDateTime}; +use filetime::FileTime; -/// Timestamp for the log record. -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +use time::{ + format_description::well_known::{Rfc2822, Rfc3339}, + Duration, OffsetDateTime, UtcOffset, +}; + +use crate::Result; + +/// Timestamp for events and log records. +#[derive(Debug, Clone, Serialize, Deserialize, PartialOrd, Eq, PartialEq)] pub struct Timestamp(OffsetDateTime); impl Default for Timestamp { @@ -19,9 +26,53 @@ impl Default for Timestamp { Self(OffsetDateTime::now_utc()) } } + +impl Timestamp { + /// Convert this timestamp to a RFC2822 formatted string. + pub fn to_rfc2822(&self) -> Result { + Ok(Timestamp::rfc2822(&self.0)?) + } + + /// Convert an offset date time to a RFC2822 formatted string. + fn rfc2822(datetime: &OffsetDateTime) -> Result { + Ok(datetime.format(&Rfc2822)?) + } + + /// Convert this timestamp to a RFC3339 formatted string. + pub fn to_rfc3339(&self) -> Result { + Ok(Timestamp::rfc3339(&self.0)?) + } + + /// Convert an offset date time to a RFC3339 formatted string. + fn rfc3339(datetime: &OffsetDateTime) -> Result { + Ok(datetime.format(&Rfc3339)?) + } +} + impl fmt::Display for Timestamp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) + match UtcOffset::current_local_offset() { + Ok(local_offset) => { + let datetime = self.0.clone(); + datetime.to_offset(local_offset); + match Timestamp::rfc2822(&datetime) { + Ok(value) => { + write!(f, "{}", value) + } + Err(_) => { + write!(f, "{}", datetime) + } + } + } + Err(_) => match self.to_rfc2822() { + Ok(value) => { + write!(f, "{}", value) + } + Err(_) => { + write!(f, "{}", self.0) + } + }, + } } } @@ -45,3 +96,19 @@ impl Decode for Timestamp { Ok(()) } } + +impl From for Timestamp { + fn from(value: OffsetDateTime) -> Self { + Self(value) + } +} + +impl TryFrom for Timestamp { + type Error = crate::Error; + + fn try_from(value: FileTime) -> std::result::Result { + let time = OffsetDateTime::from_unix_timestamp(value.seconds())? + + Duration::nanoseconds(value.nanoseconds() as i64); + Ok(time.into()) + } +} diff --git a/workspace/core/src/vault.rs b/workspace/core/src/vault.rs index e4b7fe90d5..b638b2c29a 100644 --- a/workspace/core/src/vault.rs +++ b/workspace/core/src/vault.rs @@ -13,14 +13,14 @@ use std::{borrow::Cow, collections::HashMap, fmt, path::Path}; use uuid::Uuid; use crate::{ + constants::{VAULT_EXT, VAULT_IDENTITY}, crypto::{ aesgcm256, algorithms::*, secret_key::SecretKey, xchacha20poly1305, AeadPack, }, events::SyncEvent, - file_identity::{FileIdentity, VAULT_IDENTITY}, secret::{SecretId, VaultMeta}, - Error, Result, + CommitHash, Error, FileIdentity, Result, }; /// Vault version identifier. @@ -32,37 +32,6 @@ pub const DEFAULT_VAULT_NAME: &str = "Login"; /// Mime type for vaults. pub const MIME_TYPE_VAULT: &str = "application/sos+vault"; -/// Type to represent the hash of the encrypted content for a secret. -#[derive( - Default, Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, -)] -pub struct CommitHash(pub [u8; 32]); - -impl AsRef<[u8; 32]> for CommitHash { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } -} - -impl CommitHash { - /// Get a copy of the underlying bytes for the commit hash. - pub fn to_bytes(&self) -> [u8; 32] { - self.0 - } -} - -impl From for [u8; 32] { - fn from(value: CommitHash) -> Self { - value.0 - } -} - -impl fmt::Display for CommitHash { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", hex::encode(&self.0)) - } -} - /// Type to represent a secret as an encrypted pair of meta data /// and secret data. #[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] @@ -112,10 +81,14 @@ impl Decode for VaultCommit { } } -/// Trait that defines the operations on a vault storage. +/// Trait that defines the operations on an encrypted vault. /// /// The storage may be in-memory, backed by a file on disc or another /// destination for the encrypted bytes. +/// +/// Use `Cow` smart pointers because when we are reading +/// from an in-memory `Vault` we can return references whereas +/// other containers such as file access would return owned data. pub trait VaultAccess { /// Get the vault summary. fn summary(&self) -> Result; @@ -126,6 +99,12 @@ pub trait VaultAccess { /// Set the name of a vault. fn set_vault_name(&mut self, name: String) -> Result>; + /// Set the vault meta data. + fn set_vault_meta( + &mut self, + meta_data: Option, + ) -> Result>; + /// Add an encrypted secret to the vault. fn create( &mut self, @@ -133,11 +112,19 @@ pub trait VaultAccess { secret: VaultEntry, ) -> Result>; - /// Get an encrypted secret from the vault. + /// Insert an encrypted secret to the vault with the given id. /// - /// Use a `Cow` smart pointer because when we are reading - /// from an in-memory `Vault` we can return references whereas - /// other containers such as file access would return owned data. + /// Used internally to support consistent identifiers when + /// mirroring in the `Gatekeeper` implementation. + #[doc(hidden)] + fn insert( + &mut self, + id: SecretId, + commit: CommitHash, + secret: VaultEntry, + ) -> Result>; + + /// Get an encrypted secret from the vault. fn read<'a>( &'a self, id: &SecretId, @@ -278,7 +265,6 @@ impl Decode for Summary { /// File header, identifier and version information #[derive(Debug, Eq, PartialEq)] pub struct Header { - identity: FileIdentity, summary: Summary, meta: Option, auth: Auth, @@ -288,7 +274,6 @@ impl Header { /// Create a new header. pub fn new(id: Uuid, name: String, algorithm: Algorithm) -> Self { Self { - identity: FileIdentity(VAULT_IDENTITY), summary: Summary::new(id, name, algorithm), meta: None, auth: Default::default(), @@ -366,7 +351,6 @@ impl Header { impl Default for Header { fn default() -> Self { Self { - identity: FileIdentity(VAULT_IDENTITY), summary: Default::default(), meta: None, auth: Default::default(), @@ -382,7 +366,8 @@ impl fmt::Display for Header { impl Encode for Header { fn encode(&self, ser: &mut Serializer) -> BinaryResult<()> { - self.identity.encode(&mut *ser)?; + FileIdentity::write_identity(&mut *ser, &VAULT_IDENTITY) + .map_err(Box::from)?; let size_pos = ser.writer.tell()?; ser.writer.write_u32(0)?; @@ -409,7 +394,8 @@ impl Encode for Header { impl Decode for Header { fn decode(&mut self, de: &mut Deserializer) -> BinaryResult<()> { - self.identity.decode(&mut *de)?; + FileIdentity::read_identity(&mut *de, &VAULT_IDENTITY) + .map_err(Box::from)?; // Read in the header length let _ = de.reader.read_u32()?; @@ -571,7 +557,7 @@ impl Vault { } /// Insert a secret into this vault. - pub(crate) fn insert(&mut self, id: SecretId, entry: VaultCommit) { + pub(crate) fn insert_entry(&mut self, id: SecretId, entry: VaultCommit) { self.contents.data.insert(id, entry); } @@ -636,7 +622,7 @@ impl Vault { /// The file extension for vault files. pub fn extension() -> &'static str { - "vault" + VAULT_EXT } /// Get the summary for this vault. @@ -752,12 +738,30 @@ impl VaultAccess for Vault { Ok(SyncEvent::SetVaultName(Cow::Owned(name))) } + fn set_vault_meta( + &mut self, + meta_data: Option, + ) -> Result> { + self.header.set_meta(meta_data); + let meta = self.header.meta().map(|m| m.clone()); + Ok(SyncEvent::SetVaultMeta(Cow::Owned(meta))) + } + fn create( &mut self, commit: CommitHash, secret: VaultEntry, ) -> Result> { let id = Uuid::new_v4(); + self.insert(id, commit, secret) + } + + fn insert( + &mut self, + id: SecretId, + commit: CommitHash, + secret: VaultEntry, + ) -> Result> { let value = self .contents .data diff --git a/workspace/core/src/wal/file.rs b/workspace/core/src/wal/file.rs index 998e4e199e..d3390ce8bd 100644 --- a/workspace/core/src/wal/file.rs +++ b/workspace/core/src/wal/file.rs @@ -14,11 +14,11 @@ //! use crate::{ commit_tree::{hash, CommitTree}, + constants::{WAL_EXT, WAL_IDENTITY}, + encode, events::WalEvent, - file_identity::{FileIdentity, WAL_IDENTITY}, timestamp::Timestamp, - vault::{encode, CommitHash}, - Error, Result, + CommitHash, Error, FileIdentity, Result, }; use std::{ fs::{File, OpenOptions}, @@ -105,13 +105,17 @@ impl WalFile { }) } + /// Get the path to this WAL file. + pub fn path(&self) -> &PathBuf { + &self.file_path + } + /// Create the write ahead log file. fn create>(path: P) -> Result { let exists = path.as_ref().exists(); if !exists { - let file = File::create(path.as_ref())?; - drop(file); + File::create(path.as_ref())?; } let mut file = OpenOptions::new() @@ -121,9 +125,7 @@ impl WalFile { let size = file.metadata()?.len(); if size == 0 { - let identity = FileIdentity(WAL_IDENTITY); - let buffer = encode(&identity)?; - file.write_all(&buffer)?; + file.write_all(&WAL_IDENTITY)?; } Ok(file) } @@ -141,7 +143,7 @@ impl WalFile { /// The file extension for WAL files. pub fn extension() -> &'static str { - "wal" + WAL_EXT } } @@ -159,7 +161,7 @@ impl WalProvider for WalFile { file.seek(SeekFrom::Start(start as u64))?; let mut buffer = vec![0; end - start]; file.read_exact(buffer.as_mut_slice())?; - partial.extend_from_slice(buffer.as_slice()); + partial.append(&mut buffer); Ok(partial) } else { Ok(partial) @@ -180,7 +182,7 @@ impl WalProvider for WalFile { for event in events { let (commit, record) = self.encode_event(event)?; commits.push(commit); - buffer.extend_from_slice(&encode(&record)?); + buffer.append(&mut encode(&record)?); } let mut hashes = @@ -225,8 +227,8 @@ impl WalProvider for WalFile { Ok(commit) } - fn event_data(&self, item: Self::Item) -> Result> { - let value = item.value; + fn event_data(&self, item: &Self::Item) -> Result> { + let value = &item.value; // Use a different file handle as the owned `file` should // be used exclusively for appending @@ -250,6 +252,7 @@ impl WalProvider for WalFile { let record = record?; commits.push(record.commit()); } + self.tree = CommitTree::new(); self.tree.append(&mut commits); self.tree.commit(); Ok(()) diff --git a/workspace/core/src/wal/memory.rs b/workspace/core/src/wal/memory.rs index 1b90ea77b1..57250182a6 100644 --- a/workspace/core/src/wal/memory.rs +++ b/workspace/core/src/wal/memory.rs @@ -14,8 +14,7 @@ use crate::{ decode, encode, events::WalEvent, timestamp::Timestamp, - vault::CommitHash, - Result, + CommitHash, Result, }; use std::ops::Range; @@ -129,7 +128,7 @@ impl WalProvider for WalMemory { Ok(commit) } - fn event_data(&self, item: Self::Item) -> Result> { + fn event_data(&self, item: &Self::Item) -> Result> { let event: WalEvent = decode(&item.1 .2)?; Ok(event) } diff --git a/workspace/core/src/wal/mod.rs b/workspace/core/src/wal/mod.rs index 2a667b1f55..a9aa43b160 100644 --- a/workspace/core/src/wal/mod.rs +++ b/workspace/core/src/wal/mod.rs @@ -1,7 +1,7 @@ //! Write ahead log types and traits. use crate::{ commit_tree::CommitTree, events::WalEvent, timestamp::Timestamp, - vault::CommitHash, Result, + CommitHash, Result, }; use std::ops::Range; @@ -13,6 +13,7 @@ use serde_binary::{ pub mod file; pub mod memory; pub mod reducer; +pub mod snapshot; /// Trait for implementations that provide access to a write-ahead log (WAL). pub trait WalProvider { @@ -54,7 +55,7 @@ pub trait WalProvider { fn append_event(&mut self, event: WalEvent<'_>) -> Result; /// Read the event data from an item. - fn event_data(&self, item: Self::Item) -> Result>; + fn event_data(&self, item: &Self::Item) -> Result>; /// Get the commit tree for the log records. fn tree(&self) -> &CommitTree; @@ -159,7 +160,8 @@ mod test { commit_tree::{hash, Comparison}, events::WalEvent, secret::SecretId, - vault::{encode, CommitHash, Vault, VaultCommit, VaultEntry}, + vault::{encode, Vault, VaultCommit, VaultEntry}, + CommitHash, }; fn mock_secret<'a>() -> Result<(SecretId, Cow<'a, VaultCommit>)> { @@ -211,7 +213,7 @@ mod test { let mut client = WalMemory::new(); for record in server.iter()? { let record = record?; - let event = server.event_data(record)?; + let event = server.event_data(&record)?; client.append_event(event)?; } @@ -230,12 +232,12 @@ mod test { // Add another event to the server from another client. server.append_event(WalEvent::DeleteSecret(id))?; - /// Check that the server contains the client proof + // Check that the server contains the client proof let proof = client.tree().head()?; let comparison = server.tree().compare(proof)?; - let matched = if let Comparison::Contains(index, _) = comparison { - index == 1 + let matched = if let Comparison::Contains(indices, _) = comparison { + indices == vec![1] } else { false }; @@ -269,10 +271,25 @@ mod test { let record = server.iter()?.next_back().unwrap()?; let proof = client.tree().head()?; + + /* + println!("client proof root {}", hex::encode(&proof.0)); + for leaf in client.tree().leaves().unwrap() { + println!("client leaf: {}", hex::encode(&leaf)); + } + println!("server proof root {}", + hex::encode(server.tree().root().unwrap())); + for leaf in server.tree().leaves().unwrap() { + println!("server leaf: {}", hex::encode(&leaf)); + } + */ + let comparison = server.tree().compare(proof)?; - if let Comparison::Contains(_, leaf) = comparison { - if let Some(records) = server.diff(leaf)? { + if let Comparison::Contains(indices, leaves) = comparison { + assert_eq!(vec![1], indices); + let leaf = leaves.get(0).unwrap(); + if let Some(records) = server.diff(*leaf)? { assert_eq!(1, records.len()); assert_eq!(&record, records.get(0).unwrap()); } else { diff --git a/workspace/core/src/wal/reducer.rs b/workspace/core/src/wal/reducer.rs index 8525419dea..8831e33d26 100644 --- a/workspace/core/src/wal/reducer.rs +++ b/workspace/core/src/wal/reducer.rs @@ -45,21 +45,18 @@ impl<'a> WalReducer<'a> { let mut it = wal.iter()?; if let Some(first) = it.next() { let log = first?; - let event = wal.event_data(log)?; + let event = wal.event_data(&log)?; if let WalEvent::CreateVault(vault) = event { self.vault = Some(vault.clone()); for record in it { let log = record?; - let event = wal.event_data(log)?; + let event = wal.event_data(&log)?; match event { WalEvent::Noop => unreachable!(), WalEvent::CreateVault(_) => { return Err(Error::WalCreateEventOnlyFirst) } - WalEvent::UpdateVault(vault) => { - self.vault = Some(vault.clone()); - } WalEvent::SetVaultName(name) => { self.vault_name = Some(name.clone()); } @@ -138,7 +135,7 @@ impl<'a> WalReducer<'a> { for (id, entry) in self.secrets { let entry = entry.into_owned(); - vault.insert(id, entry); + vault.insert_entry(id, entry); } Ok(vault) } else { @@ -154,8 +151,9 @@ mod test { crypto::secret_key::SecretKey, secret::{Secret, SecretId, SecretMeta}, test_utils::*, - vault::{decode, CommitHash, VaultAccess, VaultCommit, VaultEntry}, + vault::{decode, VaultAccess, VaultCommit, VaultEntry}, wal::file::WalFile, + CommitHash, }; use anyhow::Result; use tempfile::NamedTempFile; diff --git a/workspace/core/src/wal/snapshot.rs b/workspace/core/src/wal/snapshot.rs new file mode 100644 index 0000000000..ab7696998d --- /dev/null +++ b/workspace/core/src/wal/snapshot.rs @@ -0,0 +1,126 @@ +//! Snapshot manager for creating WAL file snapshots. +use crate::{CommitHash, Error, Result, Timestamp}; +use filetime::FileTime; +use std::{ + fs::File, + path::{Path, PathBuf}, +}; +use uuid::Uuid; + +/// Directory used to store snapshots. +const SNAPSHOTS_DIR: &str = "snapshots"; + +/// Snapshot represented by it's timestamp derived from +/// the file modification time and the commit hash of the +/// WAL root hash. +pub struct SnapShot(pub PathBuf, pub Timestamp, pub CommitHash, pub u64); + +/// Manages a collection of WAL snapshots. +pub struct SnapShotManager { + snapshots_dir: PathBuf, +} + +impl SnapShotManager { + /// Create a new snapshot manager. + /// + /// The `base_dir` must be a directory and already exist; this + /// function will create a nested directory called `snapshots` used + /// to store the snapshot files. + pub fn new>(base_dir: P) -> Result { + let snapshots_dir = base_dir.as_ref().join(SNAPSHOTS_DIR); + if !snapshots_dir.exists() { + std::fs::create_dir(&snapshots_dir)?; + } + + if !snapshots_dir.is_dir() { + return Err(Error::NotDirectory(snapshots_dir)); + } + + Ok(Self { snapshots_dir }) + } + + /// Create a snapshot from a WAL file. + /// + /// If a snapshot already exists with the current root hash + /// then this will return `false` to indicate no snapshot was + /// created. + pub fn create>( + &self, + vault_id: &Uuid, + wal: P, + root_hash: [u8; 32], + ) -> Result<(SnapShot, bool)> { + let root_id = hex::encode(root_hash); + let snapshot_parent = self.snapshots_dir.join(vault_id.to_string()); + + if !snapshot_parent.exists() { + std::fs::create_dir(&snapshot_parent)?; + } + + let snapshot_path = snapshot_parent.join(&root_id); + + tracing::debug!( + vault_id = %vault_id, + root_hash = ?root_id, + "WAL snapshot id"); + + // Assuming the integrity of any existing snapshot file + let created = if !snapshot_path.exists() { + let mut wal_file = File::open(wal.as_ref())?; + let mut snapshot_file = File::create(&snapshot_path)?; + tracing::debug!( + wal = ?wal.as_ref(), + snapshot = ?snapshot_path, + "WAL snapshot path"); + std::io::copy(&mut wal_file, &mut snapshot_file)?; + true + } else { + false + }; + + let meta = snapshot_path.metadata()?; + let mtime = FileTime::from_last_modification_time(&meta); + + let snapshot = SnapShot( + snapshot_path, + mtime.try_into()?, + CommitHash(root_hash), + meta.len(), + ); + + Ok((snapshot, created)) + } + + /// List snapshots sorted by file modification time. + pub fn list(&self, vault_id: &Uuid) -> Result> { + let mut snapshots = Vec::new(); + let snapshot_parent = self.snapshots_dir.join(vault_id.to_string()); + if snapshot_parent.is_dir() { + for entry in std::fs::read_dir(&snapshot_parent)? { + let entry = entry?; + let name = entry.file_name(); + let name = name.to_string_lossy(); + // 32 byte hash as a hex-encoded string is 64 bytes + if name.len() == 64 { + let root_hash = hex::decode(name.as_bytes())?; + let root_hash: [u8; 32] = + root_hash.as_slice().try_into()?; + let path = entry.path().to_path_buf(); + let meta = entry.metadata()?; + let mtime = FileTime::from_last_modification_time(&meta); + snapshots.push(SnapShot( + path, + mtime.try_into()?, + CommitHash(root_hash), + meta.len(), + )); + } + } + } + + // Sort by file modification time + snapshots.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap()); + + Ok(snapshots) + } +} diff --git a/workspace/readline/src/input.rs b/workspace/readline/src/input.rs index a2178b16aa..838a2ed6b5 100644 --- a/workspace/readline/src/input.rs +++ b/workspace/readline/src/input.rs @@ -167,3 +167,25 @@ pub fn read_flag(prompt: Option<&str>) -> Result { Err(e) => Err(Error::Readline(e)), } } + +/// Represents a choice message and associated type. +pub struct Choice<'a, T>(pub &'a str, pub T); + +/// Choose from a list of options. +pub fn choose<'a, T>( + prompt: Option<&str>, + options: &'a [Choice], +) -> Result> { + for (index, option) in options.iter().enumerate() { + println!("{}) {}", index + 1, option.0); + } + + let value = read_line(prompt)?; + match value.parse::() { + Ok(num) => { + let num = if num > 0 { num - 1 } else { num }; + Ok(options.get(num).as_ref().map(|result| &result.1)) + } + Err(_) => Ok(None), + } +} diff --git a/workspace/server/public/index.0c4d43b9.js b/workspace/server/public/index.0c4d43b9.js new file mode 100644 index 0000000000..2dcf4a6d02 --- /dev/null +++ b/workspace/server/public/index.0c4d43b9.js @@ -0,0 +1,120 @@ +function e(e,t,n,r){Object.defineProperty(e,t,{get:n,set:r,enumerable:!0,configurable:!0})}function t(e){Object.defineProperty(e,"__esModule",{value:!0,configurable:!0})}function n(e){return e&&e.__esModule?e.default:e}function r(e,t){return Object.keys(t).forEach((function(n){"default"===n||"__esModule"===n||e.hasOwnProperty(n)||Object.defineProperty(e,n,{enumerable:!0,get:function(){return t[n]}})})),e}var a="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof self?self:"undefined"!=typeof window?window:"undefined"!=typeof global?global:{},o={},i={},l=a.parcelRequire809c;null==l&&((l=function(e){if(e in o)return o[e].exports;if(e in i){var t=i[e];delete i[e];var n={id:e,exports:{}};return o[e]=n,t.call(n.exports,n,n.exports),n.exports}var r=new Error("Cannot find module '"+e+"'");throw r.code="MODULE_NOT_FOUND",r}).register=function(e,t){i[e]=t},a.parcelRequire809c=l),l.register("27Lyk",(function(t,n){var r,a;e(t.exports,"register",(()=>r),(e=>r=e)),e(t.exports,"resolve",(()=>a),(e=>a=e));var o={};r=function(e){for(var t=Object.keys(e),n=0;nr),(e=>r=e)),e(t.exports,"jsx",(()=>a),(e=>a=e)),e(t.exports,"jsxs",(()=>o),(e=>o=e));var i=l("acw62"),s=Symbol.for("react.element"),u=Symbol.for("react.fragment"),c=Object.prototype.hasOwnProperty,d=i.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ReactCurrentOwner,f={key:!0,ref:!0,__self:!0,__source:!0};function p(e,t,n){var r,a={},o=null,i=null;for(r in void 0!==n&&(o=""+n),void 0!==t.key&&(o=""+t.key),void 0!==t.ref&&(i=t.ref),t)c.call(t,r)&&!f.hasOwnProperty(r)&&(a[r]=t[r]);if(e&&e.defaultProps)for(r in t=e.defaultProps)void 0===a[r]&&(a[r]=t[r]);return{$$typeof:s,type:e,key:o,ref:i,props:a,_owner:d.current}}r=u,a=p,o=p})),l.register("acw62",(function(e,t){e.exports=l("2pUnB")})),l.register("2pUnB",(function(t,n){var r,a,o,i,l,s,u,c,d,f,p,m,h,v,g,b,y,x,w,S,k,C,E,j,M,R,P,O,T,z,I,N,A,L,$;e(t.exports,"Children",(()=>r),(e=>r=e)),e(t.exports,"Component",(()=>a),(e=>a=e)),e(t.exports,"Fragment",(()=>o),(e=>o=e)),e(t.exports,"Profiler",(()=>i),(e=>i=e)),e(t.exports,"PureComponent",(()=>l),(e=>l=e)),e(t.exports,"StrictMode",(()=>s),(e=>s=e)),e(t.exports,"Suspense",(()=>u),(e=>u=e)),e(t.exports,"__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED",(()=>c),(e=>c=e)),e(t.exports,"cloneElement",(()=>d),(e=>d=e)),e(t.exports,"createContext",(()=>f),(e=>f=e)),e(t.exports,"createElement",(()=>p),(e=>p=e)),e(t.exports,"createFactory",(()=>m),(e=>m=e)),e(t.exports,"createRef",(()=>h),(e=>h=e)),e(t.exports,"forwardRef",(()=>v),(e=>v=e)),e(t.exports,"isValidElement",(()=>g),(e=>g=e)),e(t.exports,"lazy",(()=>b),(e=>b=e)),e(t.exports,"memo",(()=>y),(e=>y=e)),e(t.exports,"startTransition",(()=>x),(e=>x=e)),e(t.exports,"unstable_act",(()=>w),(e=>w=e)),e(t.exports,"useCallback",(()=>S),(e=>S=e)),e(t.exports,"useContext",(()=>k),(e=>k=e)),e(t.exports,"useDebugValue",(()=>C),(e=>C=e)),e(t.exports,"useDeferredValue",(()=>E),(e=>E=e)),e(t.exports,"useEffect",(()=>j),(e=>j=e)),e(t.exports,"useId",(()=>M),(e=>M=e)),e(t.exports,"useImperativeHandle",(()=>R),(e=>R=e)),e(t.exports,"useInsertionEffect",(()=>P),(e=>P=e)),e(t.exports,"useLayoutEffect",(()=>O),(e=>O=e)),e(t.exports,"useMemo",(()=>T),(e=>T=e)),e(t.exports,"useReducer",(()=>z),(e=>z=e)),e(t.exports,"useRef",(()=>I),(e=>I=e)),e(t.exports,"useState",(()=>N),(e=>N=e)),e(t.exports,"useSyncExternalStore",(()=>A),(e=>A=e)),e(t.exports,"useTransition",(()=>L),(e=>L=e)),e(t.exports,"version",(()=>$),(e=>$=e));var F=Symbol.for("react.element"),_=Symbol.for("react.portal"),B=Symbol.for("react.fragment"),D=Symbol.for("react.strict_mode"),W=Symbol.for("react.profiler"),V=Symbol.for("react.provider"),H=Symbol.for("react.context"),G=Symbol.for("react.forward_ref"),U=Symbol.for("react.suspense"),q=Symbol.for("react.memo"),K=Symbol.for("react.lazy"),Y=Symbol.iterator;var Q={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},X=Object.assign,J={};function Z(e,t,n){this.props=e,this.context=t,this.refs=J,this.updater=n||Q}function ee(){}function te(e,t,n){this.props=e,this.context=t,this.refs=J,this.updater=n||Q}Z.prototype.isReactComponent={},Z.prototype.setState=function(e,t){if("object"!=typeof e&&"function"!=typeof e&&null!=e)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")},Z.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},ee.prototype=Z.prototype;var ne=te.prototype=new ee;ne.constructor=te,X(ne,Z.prototype),ne.isPureReactComponent=!0;var re=Array.isArray,ae=Object.prototype.hasOwnProperty,oe={current:null},ie={key:!0,ref:!0,__self:!0,__source:!0};function le(e,t,n){var r,a={},o=null,i=null;if(null!=t)for(r in void 0!==t.ref&&(i=t.ref),void 0!==t.key&&(o=""+t.key),t)ae.call(t,r)&&!ie.hasOwnProperty(r)&&(a[r]=t[r]);var l=arguments.length-2;if(1===l)a.children=n;else if(1r),(e=>r=e)),e(t.exports,"createPortal",(()=>a),(e=>a=e)),e(t.exports,"createRoot",(()=>o),(e=>o=e)),e(t.exports,"findDOMNode",(()=>i),(e=>i=e)),e(t.exports,"flushSync",(()=>s),(e=>s=e)),e(t.exports,"hydrate",(()=>u),(e=>u=e)),e(t.exports,"hydrateRoot",(()=>c),(e=>c=e)),e(t.exports,"render",(()=>d),(e=>d=e)),e(t.exports,"unmountComponentAtNode",(()=>f),(e=>f=e)),e(t.exports,"unstable_batchedUpdates",(()=>p),(e=>p=e)),e(t.exports,"unstable_renderSubtreeIntoContainer",(()=>m),(e=>m=e)),e(t.exports,"version",(()=>h),(e=>h=e));var v=l("acw62"),g=l("fO90s");function b(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n