1
use std::io::SeekFrom;
2
use std::mem::size_of;
3

            
4
use bonsaidb_files::{BonsaiFiles, FileConfig, FilesSchema};
5
use bonsaidb_local::config::{Builder, StorageConfiguration};
6
use bonsaidb_local::AsyncDatabase;
7
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
8

            
9
#[cfg_attr(not(test), tokio::main)]
10
1
#[cfg_attr(test, tokio::test)]
11
1
async fn main() -> anyhow::Result<()> {
12
    // Create a database for our files. If you would like to use these
13
    // collections in an existing datasbase/schema, `BonsaiFiles` exposes a
14
    // function `define_collections()` which can be called from your
15
    // `Schema::define_collections()` implementation.
16
    //
17
    // Or, if you're using the Schema derive macro, you can add a parameter
18
    // `include = [FilesSchema<BonsaiFiles>]` to use BonsaiFiles within your
19
    // existing schema.
20
1
    let database = AsyncDatabase::open::<FilesSchema<BonsaiFiles>>(StorageConfiguration::new(
21
1
        "basic-files.bonsaidb",
22
1
    ))
23
1
    .await?;
24

            
25
    // This crate provides a very basic path-based file storage. Documents can
26
    // be up to 4GB in size, but must be loaded completely to access. Files
27
    // stored using `bonsaidb-files` are broken into blocks and can be streamed
28
    // and/or randomly accessed.
29
    //
30
    // The `BonsaiFiles` type implements `FileConfig` and defines a block size
31
    // of 64kb.
32
1
    let mut one_megabyte = Vec::with_capacity(1024 * 1024);
33
262144
    for i in 0..one_megabyte.capacity() / size_of::<u32>() {
34
262144
        // Each u32 in the file will be the current offset in the file.
35
262144
        let offset = u32::try_from(i * size_of::<u32>()).unwrap();
36
262144
        one_megabyte.extend(offset.to_be_bytes());
37
262144
    }
38
1
    let mut file = BonsaiFiles::build("async-file")
39
1
        .contents(&one_megabyte)
40
1
        .create_async(&database)
41
2
        .await?;
42

            
43
    // By default, files will be stored at the root level:
44
1
    assert_eq!(file.path(), "/async-file");
45

            
46
    // We can access this file's contents using `tokio::io::AsyncRead` and
47
    // `tokio::io::AsyncSeek`. Due to how the buffer contents reader works, it
48
    // is safe to call this type's `std::io::Seek` implementation in an async
49
    // context, as it is non-blocking.
50
1
    let mut contents = file.contents().await?;
51
1
    assert_eq!(contents.len(), u64::try_from(one_megabyte.len()).unwrap());
52
1
    contents.seek(SeekFrom::Start(1024)).await?;
53
1
    let mut offset = [0; size_of::<u32>()];
54
1
    contents.read_exact(&mut offset).await?;
55
1
    let offset = u32::from_be_bytes(offset);
56
1
    assert_eq!(offset, 1024);
57
1
    drop(contents);
58
1

            
59
1
    // Files can be appended to, but existing contents cannot be modified.
60
1
    // `File::append()` can be used to write data that you have in memory.
61
1
    // Alternatively, a buffered writer can be used to write larger amounts of
62
1
    // data using `tokio::io::AsyncWrite`.
63
1
    let mut writer = file.append_buffered();
64
1
    let mut reader = &one_megabyte[..];
65
1
    let bytes_written = tokio::io::copy(&mut reader, &mut writer).await?;
66
1
    assert_eq!(bytes_written, u64::try_from(one_megabyte.len()).unwrap());
67
1
    writer.flush().await?;
68
    // The writer will attempt to flush on drop if there are any bytes remaining
69
    // in the buffer. Any errors will be ignored, however, so it is safer to
70
    // flush where you can handle the error.
71
1
    drop(writer);
72

            
73
    // Verify the file has the new contents.
74
1
    let contents = file.contents().await?;
75
1
    assert_eq!(
76
1
        contents.len(),
77
1
        u64::try_from(one_megabyte.len()).unwrap() * 2
78
1
    );
79

            
80
    // Clean up the file.
81
2
    file.delete().await?;
82

            
83
1
    Ok(())
84
}