1
1
//! [`VaultKeyStorage`] using S3-compatible storage.
2
//!
3
//! This is the recommended method for securing your BonsaiDb database. There
4
//! are many ways to acquire secure, inexpensive S3-compatible storage, such as
5
//! Backblaze B2.
6
//!
7
//! Do not configure your bucket with public access. You should only allow
8
//! access from the IP addresses that your BonsaiDb server(s) are hosted on,
9
//! or only allow authenticated access.
10
//!
11
//! To use this, specify the `vault_key_storage` configuration parameter:
12
//!
13
//! ```rust
14
//! # use bonsaidb_keystorage_s3::S3VaultKeyStorage;
15
//! # use aws_sdk_s3::Endpoint;
16
//! # use bonsaidb_core::{document::KeyId, test_util::TestDirectory};
17
//! # use bonsaidb_local::config::{StorageConfiguration, Builder};
18
//! # use http::Uri;
19
//! #
20
//! # async fn test() {
21
//! let directory = TestDirectory::new("bonsaidb-keystorage-s3-basic");
22
//! let configuration = StorageConfiguration::new(&directory)
23
//!     .vault_key_storage(
24
//!         S3VaultKeyStorage::new("bucket_name").endpoint(Endpoint::immutable(
25
//!             Uri::try_from("https://s3.us-west-001.backblazeb2.com").unwrap(),
26
//!         )),
27
//!     )
28
//!     .default_encryption_key(KeyId::Master);
29
//! # }
30
//! ```
31
//!
32
//! The API calls are performed by the [`aws-sdk-s3`](aws_sdk_s3) crate.
33

            
34
#![forbid(unsafe_code)]
35
#![warn(
36
    clippy::cargo,
37
    missing_docs,
38
    // clippy::missing_docs_in_private_items,
39
    clippy::nursery,
40
    clippy::pedantic,
41
    future_incompatible,
42
    rust_2018_idioms,
43
)]
44
#![allow(
45
    clippy::missing_errors_doc, // TODO clippy::missing_errors_doc
46
    clippy::missing_panics_doc, // TODO clippy::missing_panics_doc
47
    clippy::option_if_let_else,
48
    clippy::module_name_repetitions,
49
)]
50

            
51
use std::{fmt::Display, future::Future};
52

            
53
use async_trait::async_trait;
54
use aws_config::meta::region::RegionProviderChain;
55
pub use aws_sdk_s3;
56
use aws_sdk_s3::{types::ByteStream, Client, Endpoint, Region};
57
use bonsaidb_local::{
58
    vault::{KeyPair, VaultKeyStorage},
59
    StorageId,
60
};
61
pub use http;
62
use tokio::runtime::{self, Handle, Runtime};
63

            
64
/// S3-compatible [`VaultKeyStorage`] implementor.
65
6
#[derive(Default, Debug)]
66
#[must_use]
67
pub struct S3VaultKeyStorage {
68
    runtime: Tokio,
69
    bucket: String,
70
    /// The S3 endpoint to use. If not specified, the endpoint will be
71
    /// determined automatically. This field can be used to support non-AWS S3
72
    /// providers.
73
    pub endpoint: Option<Endpoint>,
74
    /// The AWS region to use. If not specified, the region will be determined
75
    /// by the aws sdk.
76
    pub region: Option<Region>,
77
    /// The path prefix for keys to be stored within.
78
    pub path: String,
79
}
80

            
81
#[derive(Debug)]
82
enum Tokio {
83
    Runtime(Runtime),
84
    Handle(Handle),
85
}
86

            
87
impl Default for Tokio {
88
6
    fn default() -> Self {
89
6
        Handle::try_current().map_or_else(
90
6
            |_| {
91
3
                Self::Runtime(
92
3
                    runtime::Builder::new_current_thread()
93
3
                        .enable_all()
94
3
                        .build()
95
3
                        .unwrap(),
96
3
                )
97
6
            },
98
6
            Self::Handle,
99
6
        )
100
6
    }
101
}
102

            
103
impl Tokio {
104
8
    pub fn block_on<F: Future<Output = R>, R>(&self, future: F) -> R {
105
8
        match self {
106
4
            Tokio::Runtime(rt) => rt.block_on(future),
107
4
            Tokio::Handle(rt) => rt.block_on(future),
108
        }
109
8
    }
110
}
111

            
112
impl S3VaultKeyStorage {
113
    /// Creates a new key storage instance for `bucket`. This instance will use
114
    /// the currently available Tokio runtime or create one if none is
115
    /// available.
116
    pub fn new(bucket: impl Display) -> Self {
117
        Self::new_with_runtime(bucket, tokio::runtime::Handle::current())
118
    }
119

            
120
    /// Creates a new key storage instance for `bucket`, which performs its
121
    /// networking operations on `runtime`.
122
    pub fn new_with_runtime(bucket: impl Display, runtime: tokio::runtime::Handle) -> Self {
123
        Self {
124
            bucket: bucket.to_string(),
125
            runtime: Tokio::Handle(runtime),
126
            ..Self::default()
127
        }
128
    }
129

            
130
    /// Sets the path prefix for vault keys to be stored within.
131
2
    pub fn path(mut self, prefix: impl Display) -> Self {
132
2
        self.path = prefix.to_string();
133
2
        self
134
2
    }
135

            
136
    /// Sets the endpoint to use. See [`Self::endpoint`] for more information.
137
    #[allow(clippy::missing_const_for_fn)] // destructors
138
    pub fn endpoint(mut self, endpoint: Endpoint) -> Self {
139
        self.endpoint = Some(endpoint);
140
        self
141
    }
142

            
143
8
    fn path_for_id(&self, storage_id: StorageId) -> String {
144
8
        let mut path = self.path.clone();
145
8
        if !path.is_empty() && !path.ends_with('/') {
146
2
            path.push('/');
147
6
        }
148
8
        path.push_str(&storage_id.to_string());
149
8
        path
150
8
    }
151

            
152
8
    async fn client(&self) -> aws_sdk_s3::Client {
153
8
        let region_provider = RegionProviderChain::first_try(self.region.clone())
154
8
            .or_default_provider()
155
8
            .or_else(Region::new("us-east-1"));
156
60
        let config = aws_config::from_env().load().await;
157
8
        if let Some(endpoint) = self.endpoint.clone() {
158
            Client::from_conf(
159
8
                aws_sdk_s3::Config::builder()
160
8
                    .endpoint_resolver(endpoint)
161
28
                    .region(region_provider.region().await)
162
8
                    .credentials_provider(config.credentials_provider().unwrap().clone())
163
8
                    .build(),
164
            )
165
        } else {
166
            Client::new(&config)
167
        }
168
8
    }
169
}
170

            
171
#[async_trait]
172
impl VaultKeyStorage for S3VaultKeyStorage {
173
    type Error = anyhow::Error;
174

            
175
2
    fn set_vault_key_for(&self, storage_id: StorageId, key: KeyPair) -> Result<(), Self::Error> {
176
2
        self.runtime.block_on(async {
177
18
            let client = self.client().await;
178
2
            let key = key.to_bytes()?;
179
2
            client
180
2
                .put_object()
181
2
                .bucket(&self.bucket)
182
2
                .key(self.path_for_id(storage_id))
183
2
                .body(ByteStream::from(key.to_vec()))
184
13
                .send()
185
13
                .await?;
186
2
            Ok(())
187
2
        })
188
2
    }
189

            
190
6
    fn vault_key_for(&self, storage_id: StorageId) -> Result<Option<KeyPair>, Self::Error> {
191
6
        self.runtime.block_on(async {
192
70
            let client = self.client().await;
193
6
            match client
194
6
                .get_object()
195
6
                .bucket(&self.bucket)
196
6
                .key(self.path_for_id(storage_id))
197
33
                .send()
198
33
                .await
199
            {
200
4
                Ok(response) => {
201
4
                    let bytes = response.body.collect().await?.into_bytes();
202
4
                    let key = KeyPair::from_bytes(&bytes)
203
4
                        .map_err(|err| anyhow::anyhow!(err.to_string()))?;
204
4
                    Ok(Some(key))
205
                }
206
                Err(aws_smithy_client::SdkError::ServiceError {
207
                    err:
208
                        aws_sdk_s3::error::GetObjectError {
209
                            kind: aws_sdk_s3::error::GetObjectErrorKind::NoSuchKey(_),
210
                            ..
211
                        },
212
                    ..
213
2
                }) => Ok(None),
214
                Err(err) => Err(anyhow::anyhow!(err)),
215
            }
216
6
        })
217
6
    }
218
}
219

            
220
#[cfg(test)]
221
macro_rules! env_var {
222
    ($name:expr) => {{
223
        match std::env::var($name) {
224
            Ok(value) if !value.is_empty() => value,
225
            _ => {
226
                log::error!(
227
                    "Ignoring basic_test because of missing environment variable: {}",
228
                    $name
229
                );
230
                return;
231
            }
232
        }
233
    }};
234
}
235

            
236
#[cfg(test)]
237
1
#[tokio::test]
238
1
async fn basic_test() {
239
1
    use bonsaidb_core::{
240
1
        connection::AsyncStorageConnection,
241
1
        document::KeyId,
242
1
        schema::SerializedCollection,
243
1
        test_util::{Basic, BasicSchema, TestDirectory},
244
1
    };
245
1
    use bonsaidb_local::{
246
1
        config::{Builder, StorageConfiguration},
247
1
        AsyncStorage,
248
1
    };
249
1
    use http::Uri;
250
1
    drop(dotenv::dotenv());
251

            
252
1
    let bucket = env_var!("S3_BUCKET");
253
1
    let endpoint = env_var!("S3_ENDPOINT");
254

            
255
1
    let directory = TestDirectory::new("bonsaidb-keystorage-s3-basic");
256
1

            
257
3
    let configuration = |prefix| {
258
3
        let mut vault_key_storage = S3VaultKeyStorage {
259
3
            bucket: bucket.clone(),
260
3
            endpoint: Some(Endpoint::immutable(Uri::try_from(&endpoint).unwrap())),
261
3
            ..S3VaultKeyStorage::default()
262
3
        };
263
3
        if let Some(prefix) = prefix {
264
1
            vault_key_storage = vault_key_storage.path(prefix);
265
2
        }
266

            
267
3
        StorageConfiguration::new(&directory)
268
3
            .vault_key_storage(vault_key_storage)
269
3
            .default_encryption_key(KeyId::Master)
270
3
            .with_schema::<BasicSchema>()
271
3
            .unwrap()
272
3
    };
273
1
    let document = {
274
1
        let bonsai = AsyncStorage::open(configuration(None)).await.unwrap();
275
1
        let db = bonsai
276
2
            .create_database::<BasicSchema>("test", false)
277
2
            .await
278
1
            .unwrap();
279
1
        Basic::new("test").push_into_async(&db).await.unwrap()
280
    };
281

            
282
    {
283
        // Should be able to access the storage again
284
1
        let bonsai = AsyncStorage::open(configuration(None)).await.unwrap();
285

            
286
1
        let db = bonsai.database::<BasicSchema>("test").await.unwrap();
287
1
        let retrieved = Basic::get_async(document.header.id, &db)
288
1
            .await
289
1
            .unwrap()
290
1
            .expect("document not found");
291
1
        assert_eq!(document, retrieved);
292
    }
293

            
294
    // Verify that we can't access the storage again without the vault
295
1
    assert!(
296
1
        AsyncStorage::open(configuration(Some(String::from("path-prefix"))))
297
1
            .await
298
1
            .is_err()
299
    );
300
1
}
301

            
302
1
#[test]
303
1
fn blocking_test() {
304
1
    use bonsaidb_core::{
305
1
        connection::StorageConnection,
306
1
        document::KeyId,
307
1
        schema::SerializedCollection,
308
1
        test_util::{Basic, BasicSchema, TestDirectory},
309
1
    };
310
1
    use bonsaidb_local::{
311
1
        config::{Builder, StorageConfiguration},
312
1
        Storage,
313
1
    };
314
1
    use http::Uri;
315
1
    drop(dotenv::dotenv());
316

            
317
1
    let bucket = env_var!("S3_BUCKET");
318
1
    let endpoint = env_var!("S3_ENDPOINT");
319

            
320
1
    let directory = TestDirectory::new("bonsaidb-keystorage-s3-blocking");
321
1

            
322
3
    let configuration = |prefix| {
323
3
        let mut vault_key_storage = S3VaultKeyStorage {
324
3
            bucket: bucket.clone(),
325
3
            endpoint: Some(Endpoint::immutable(Uri::try_from(&endpoint).unwrap())),
326
3
            ..S3VaultKeyStorage::default()
327
3
        };
328
3
        if let Some(prefix) = prefix {
329
1
            vault_key_storage = vault_key_storage.path(prefix);
330
2
        }
331

            
332
3
        StorageConfiguration::new(&directory)
333
3
            .vault_key_storage(vault_key_storage)
334
3
            .default_encryption_key(KeyId::Master)
335
3
            .with_schema::<BasicSchema>()
336
3
            .unwrap()
337
3
    };
338
1
    let document = {
339
1
        let bonsai = Storage::open(configuration(None)).unwrap();
340
1
        let db = bonsai
341
1
            .create_database::<BasicSchema>("test", false)
342
1
            .unwrap();
343
1
        Basic::new("test").push_into(&db).unwrap()
344
1
    };
345
1

            
346
1
    {
347
1
        // Should be able to access the storage again
348
1
        let bonsai = Storage::open(configuration(None)).unwrap();
349
1

            
350
1
        let db = bonsai.database::<BasicSchema>("test").unwrap();
351
1
        let retrieved = Basic::get(document.header.id, &db)
352
1
            .unwrap()
353
1
            .expect("document not found");
354
1
        assert_eq!(document, retrieved);
355
    }
356

            
357
    // Verify that we can't access the storage again without the vault
358
1
    assert!(Storage::open(configuration(Some(String::from("path-prefix")))).is_err());
359
1
}