1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
pub mod backup_service_client;
pub(crate) mod error_notes;
pub mod read_record_bytes;
pub mod storage_ext;
pub(crate) mod stream;
#[cfg(test)]
pub mod test_utils;
use anyhow::{anyhow, Result};
use diem_config::config::RocksdbConfig;
use diem_crypto::HashValue;
use diem_infallible::duration_since_epoch;
use diem_jellyfish_merkle::{restore::JellyfishMerkleRestore, NodeBatch, TreeWriter};
use diem_types::{account_state_blob::AccountStateBlob, transaction::Version, waypoint::Waypoint};
use diemdb::{backup::restore_handler::RestoreHandler, DiemDB, GetRestoreHandler};
use std::{
collections::HashMap,
convert::TryFrom,
mem::size_of,
path::{Path, PathBuf},
sync::Arc,
};
use structopt::StructOpt;
use tokio::fs::metadata;
#[derive(Clone, StructOpt)]
pub struct GlobalBackupOpt {
#[structopt(
long = "max-chunk-size",
default_value = "134217728",
help = "Maximum chunk file size in bytes."
)]
pub max_chunk_size: usize,
}
#[derive(Clone, StructOpt)]
pub struct RocksdbOpt {
#[structopt(long, default_value = "1000")]
max_open_files: i32,
#[structopt(long, default_value = "1073741824")]
max_total_wal_size: u64,
}
impl From<RocksdbOpt> for RocksdbConfig {
fn from(opt: RocksdbOpt) -> Self {
Self {
max_open_files: opt.max_open_files,
max_total_wal_size: opt.max_total_wal_size,
}
}
}
impl Default for RocksdbOpt {
fn default() -> Self {
Self::from_iter(vec!["exe"])
}
}
#[derive(Clone, StructOpt)]
pub struct GlobalRestoreOpt {
#[structopt(long, help = "Dry run without writing data to DB.")]
pub dry_run: bool,
#[structopt(
long = "target-db-dir",
parse(from_os_str),
conflicts_with = "dry-run",
required_unless = "dry-run"
)]
pub db_dir: Option<PathBuf>,
#[structopt(
long,
help = "Content newer than this version will not be recovered to DB, \
defaulting to the largest version possible, meaning recover everything in the backups."
)]
pub target_version: Option<Version>,
#[structopt(flatten)]
pub trusted_waypoints: TrustedWaypointOpt,
#[structopt(flatten)]
pub rocksdb_opt: RocksdbOpt,
#[structopt(flatten)]
pub concurernt_downloads: ConcurrentDownloadsOpt,
}
pub enum RestoreRunMode {
Restore { restore_handler: RestoreHandler },
Verify,
}
struct MockTreeWriter;
impl TreeWriter<AccountStateBlob> for MockTreeWriter {
fn write_node_batch(&self, _node_batch: &NodeBatch<AccountStateBlob>) -> Result<()> {
Ok(())
}
}
impl RestoreRunMode {
pub fn name(&self) -> &'static str {
match self {
Self::Restore { restore_handler: _ } => "restore",
Self::Verify => "verify",
}
}
pub fn is_verify(&self) -> bool {
match self {
Self::Restore { restore_handler: _ } => false,
Self::Verify => true,
}
}
pub fn get_state_restore_receiver(
&self,
version: Version,
expected_root_hash: HashValue,
) -> Result<JellyfishMerkleRestore<AccountStateBlob>> {
match self {
Self::Restore { restore_handler } => {
restore_handler.get_state_restore_receiver(version, expected_root_hash)
}
Self::Verify => JellyfishMerkleRestore::new_overwrite(
Arc::new(MockTreeWriter),
version,
expected_root_hash,
),
}
}
}
#[derive(Clone)]
pub struct GlobalRestoreOptions {
pub target_version: Version,
pub trusted_waypoints: Arc<HashMap<Version, Waypoint>>,
pub run_mode: Arc<RestoreRunMode>,
pub concurrent_downloads: usize,
}
impl TryFrom<GlobalRestoreOpt> for GlobalRestoreOptions {
type Error = anyhow::Error;
fn try_from(opt: GlobalRestoreOpt) -> Result<Self> {
let target_version = opt.target_version.unwrap_or(Version::max_value());
let concurrent_downloads = opt.concurernt_downloads.get();
let run_mode = if let Some(db_dir) = &opt.db_dir {
let restore_handler = Arc::new(DiemDB::open(
db_dir,
false, None, opt.rocksdb_opt.into(),
)?)
.get_restore_handler();
RestoreRunMode::Restore { restore_handler }
} else {
RestoreRunMode::Verify
};
Ok(Self {
target_version,
trusted_waypoints: Arc::new(opt.trusted_waypoints.verify()?),
run_mode: Arc::new(run_mode),
concurrent_downloads,
})
}
}
#[derive(Clone, Default, StructOpt)]
pub struct TrustedWaypointOpt {
#[structopt(
long,
help = "(multiple) When provided, an epoch ending LedgerInfo at the waypoint version will be \
checked against the hash in the waypoint, but signatures on it are NOT checked. \
Use this for two purposes: \
1. set the genesis or the latest waypoint to confirm the backup is compatible. \
2. set waypoints at versions where writeset transactions were used to overwrite the \
validator set, so that the signature check is skipped. \
N.B. LedgerInfos are verified only when restoring / verifying the epoch ending backups, \
i.e. they are NOT checked at all when doing one-shot restoring of the transaction \
and state backups."
)]
pub trust_waypoint: Vec<Waypoint>,
}
impl TrustedWaypointOpt {
pub fn verify(self) -> Result<HashMap<Version, Waypoint>> {
let mut trusted_waypoints = HashMap::new();
for w in self.trust_waypoint {
trusted_waypoints
.insert(w.version(), w)
.map_or(Ok(()), |w| {
Err(anyhow!("Duplicated waypoints at version {}", w.version()))
})?;
}
Ok(trusted_waypoints)
}
}
#[derive(Clone, Copy, Default, StructOpt)]
pub struct ConcurrentDownloadsOpt {
#[structopt(
long,
help = "[Defaults to number of CPUs] \
number of concurrent downloads including metadata files from the backup storage."
)]
concurrent_downloads: Option<usize>,
}
impl ConcurrentDownloadsOpt {
pub fn get(&self) -> usize {
self.concurrent_downloads.unwrap_or_else(num_cpus::get)
}
}
pub(crate) fn should_cut_chunk(chunk: &[u8], record: &[u8], max_chunk_size: usize) -> bool {
!chunk.is_empty() && chunk.len() + record.len() + size_of::<u32>() > max_chunk_size
}
pub(crate) async fn path_exists(path: &Path) -> bool {
metadata(&path).await.is_ok()
}
pub(crate) trait PathToString {
fn path_to_string(&self) -> Result<String>;
}
impl<T: AsRef<Path>> PathToString for T {
fn path_to_string(&self) -> Result<String> {
self.as_ref()
.to_path_buf()
.into_os_string()
.into_string()
.map_err(|s| anyhow!("into_string failed for OsString '{:?}'", s))
}
}
pub(crate) fn unix_timestamp_sec() -> i64 {
duration_since_epoch().as_secs() as i64
}