pub struct Node { /* private fields */ }Expand description
A Zarr hierarchy node.
See https://zarr-specs.readthedocs.io/en/latest/v3/core/index.html#hierarchy.
Implementations§
Source§impl Node
impl Node
Sourcepub fn open<TStorage: ?Sized + ReadableStorageTraits + ListableStorageTraits>(
storage: &Arc<TStorage>,
path: &str,
) -> Result<Self, NodeCreateError>
pub fn open<TStorage: ?Sized + ReadableStorageTraits + ListableStorageTraits>( storage: &Arc<TStorage>, path: &str, ) -> Result<Self, NodeCreateError>
Open a node at path and read metadata and children from storage with default MetadataRetrieveVersion.
§Errors
Returns NodeCreateError if metadata is invalid or there is a failure to list child nodes.
Examples found in repository?
10fn rectilinear_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
11 use rayon::prelude::{IntoParallelIterator, ParallelIterator};
12 use zarrs::array::{ArraySubset, ZARR_NAN_F32, codec, data_type};
13 use zarrs::node::Node;
14 use zarrs::storage::store;
15
16 // Create a store
17 // let path = tempfile::TempDir::new()?;
18 // let mut store: ReadableWritableListableStorage =
19 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
20 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
21 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
22 && arg1 == "--usage-log"
23 {
24 let log_writer = Arc::new(std::sync::Mutex::new(
25 // std::io::BufWriter::new(
26 std::io::stdout(),
27 // )
28 ));
29 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
30 chrono::Utc::now().format("[%T%.3f] ").to_string()
31 }));
32 }
33
34 // Create the root group
35 zarrs::group::GroupBuilder::new()
36 .build(store.clone(), "/")?
37 .store_metadata()?;
38
39 // Create a group with attributes
40 let group_path = "/group";
41 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
42 group
43 .attributes_mut()
44 .insert("foo".into(), serde_json::Value::String("bar".into()));
45 group.store_metadata()?;
46
47 println!(
48 "The group metadata is:\n{}\n",
49 group.metadata().to_string_pretty()
50 );
51
52 // Create an array
53 let array_path = "/group/array";
54 let array = zarrs::array::ArrayBuilder::new(
55 vec![8, 8], // array shape
56 MetadataV3::new_with_configuration(
57 "rectilinear",
58 RectilinearChunkGridConfiguration::Inline {
59 chunk_shapes: vec![
60 // Varying: chunk sizes [1, 1, 1, 3, 2] (run-length encoded as [[1,3], 3, 2])
61 ChunkEdgeLengths::Varying(serde_json::from_str("[[1,3], 3, 2]").unwrap()),
62 // Scalar: regular 4-element chunks
63 ChunkEdgeLengths::Scalar(NonZeroU64::new(4).unwrap()),
64 ],
65 },
66 ),
67 data_type::float32(),
68 ZARR_NAN_F32,
69 )
70 .bytes_to_bytes_codecs(vec![
71 #[cfg(feature = "gzip")]
72 Arc::new(codec::GzipCodec::new(5)?),
73 ])
74 .dimension_names(["y", "x"].into())
75 // .storage_transformers(vec![].into())
76 .build(store.clone(), array_path)?;
77
78 // Write array metadata to store
79 array.store_metadata()?;
80
81 // Write some chunks (in parallel)
82 (0..4).into_par_iter().try_for_each(|i| {
83 let chunk_grid = array.chunk_grid();
84 let chunk_indices = vec![i, 0];
85 if let Some(chunk_shape) = chunk_grid.chunk_shape(&chunk_indices)? {
86 let chunk_array = ndarray::ArrayD::<f32>::from_elem(
87 chunk_shape
88 .iter()
89 .map(|u| u.get() as usize)
90 .collect::<Vec<_>>(),
91 i as f32,
92 );
93 array.store_chunk(&chunk_indices, chunk_array)
94 } else {
95 Err(zarrs::array::ArrayError::InvalidChunkGridIndicesError(
96 chunk_indices.to_vec(),
97 ))
98 }
99 })?;
100
101 println!(
102 "The array metadata is:\n{}\n",
103 array.metadata().to_string_pretty()
104 );
105
106 // Write a subset spanning multiple chunks, including updating chunks already written
107 array.store_array_subset(
108 &[3..6, 3..6], // start
109 ndarray::ArrayD::<f32>::from_shape_vec(
110 vec![3, 3],
111 vec![0.1f32, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
112 )?,
113 )?;
114
115 // Store elements directly, in this case set the 7th column to 123.0
116 array.store_array_subset(&[0..8, 6..7], &[123.0f32; 8])?;
117
118 // Store elements directly in a chunk, in this case set the last row of the bottom right chunk
119 array.store_chunk_subset(
120 // chunk indices
121 &[3, 1],
122 // subset within chunk
123 &[1..2, 0..4],
124 &[-4.0f32; 4],
125 )?;
126
127 // Read the whole array
128 let data_all: ndarray::ArrayD<f32> = array.retrieve_array_subset(&array.subset_all())?;
129 println!("The whole array is:\n{data_all}\n");
130
131 // Read a chunk back from the store
132 let chunk_indices = vec![1, 0];
133 let data_chunk: ndarray::ArrayD<f32> = array.retrieve_chunk(&chunk_indices)?;
134 println!("Chunk [1,0] is:\n{data_chunk}\n");
135
136 // Read the central 4x2 subset of the array
137 let subset_4x2 = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
138 let data_4x2: ndarray::ArrayD<f32> = array.retrieve_array_subset(&subset_4x2)?;
139 println!("The middle 4x2 subset is:\n{data_4x2}\n");
140
141 // Show the hierarchy
142 let node = Node::open(&store, "/").unwrap();
143 let tree = node.hierarchy_tree();
144 println!("The Zarr hierarchy tree is:\n{tree}");
145
146 Ok(())
147}More examples
8fn array_write_read() -> Result<(), Box<dyn std::error::Error>> {
9 use std::sync::Arc;
10
11 use zarrs::array::{ArraySubset, ZARR_NAN_F32, data_type};
12 use zarrs::node::Node;
13 use zarrs::storage::store;
14
15 // Create a store
16 // let path = tempfile::TempDir::new()?;
17 // let mut store: ReadableWritableListableStorage =
18 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
19 // let mut store: ReadableWritableListableStorage = Arc::new(
20 // zarrs::filesystem::FilesystemStore::new("zarrs/tests/data/array_write_read.zarr")?,
21 // );
22 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
23 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
24 && arg1 == "--usage-log"
25 {
26 let log_writer = Arc::new(std::sync::Mutex::new(
27 // std::io::BufWriter::new(
28 std::io::stdout(),
29 // )
30 ));
31 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
32 chrono::Utc::now().format("[%T%.3f] ").to_string()
33 }));
34 }
35
36 // Create the root group
37 zarrs::group::GroupBuilder::new()
38 .build(store.clone(), "/")?
39 .store_metadata()?;
40
41 // Create a group with attributes
42 let group_path = "/group";
43 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
44 group
45 .attributes_mut()
46 .insert("foo".into(), serde_json::Value::String("bar".into()));
47 group.store_metadata()?;
48
49 println!(
50 "The group metadata is:\n{}\n",
51 group.metadata().to_string_pretty()
52 );
53
54 // Create an array
55 let array_path = "/group/array";
56 let array = zarrs::array::ArrayBuilder::new(
57 vec![8, 8], // array shape
58 vec![4, 4], // regular chunk shape
59 data_type::float32(),
60 ZARR_NAN_F32,
61 )
62 // .bytes_to_bytes_codecs(vec![]) // uncompressed
63 .dimension_names(["y", "x"].into())
64 // .storage_transformers(vec![].into())
65 .build(store.clone(), array_path)?;
66
67 // Write array metadata to store
68 array.store_metadata()?;
69
70 println!(
71 "The array metadata is:\n{}\n",
72 array.metadata().to_string_pretty()
73 );
74
75 // Write some chunks
76 (0..2).into_par_iter().try_for_each(|i| {
77 let chunk_indices: Vec<u64> = vec![0, i];
78 let chunk_subset = array.chunk_grid().subset(&chunk_indices)?.ok_or_else(|| {
79 zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
80 })?;
81 array.store_chunk(
82 &chunk_indices,
83 vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
84 )
85 })?;
86
87 let subset_all = array.subset_all();
88 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
89 println!("store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");
90
91 // Store multiple chunks
92 array.store_chunks(
93 &[1..2, 0..2],
94 &[
95 //
96 1.0f32, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
97 //
98 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
99 ],
100 )?;
101 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
102 println!("store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");
103
104 // Write a subset spanning multiple chunks, including updating chunks already written
105 array.store_array_subset(
106 &[3..6, 3..6],
107 &[-3.3f32, -3.4, -3.5, -4.3, -4.4, -4.5, -5.3, -5.4, -5.5],
108 )?;
109 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
110 println!("store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");
111
112 // Store array subset
113 array.store_array_subset(
114 &[0..8, 6..7],
115 &[-0.6f32, -1.6, -2.6, -3.6, -4.6, -5.6, -6.6, -7.6],
116 )?;
117 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
118 println!("store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");
119
120 // Store chunk subset
121 array.store_chunk_subset(
122 // chunk indices
123 &[1, 1],
124 // subset within chunk
125 &[3..4, 0..4],
126 &[-7.4f32, -7.5, -7.6, -7.7],
127 )?;
128 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
129 println!("store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");
130
131 // Erase a chunk
132 array.erase_chunk(&[0, 0])?;
133 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
134 println!("erase_chunk [0, 0]:\n{data_all:+4.1}\n");
135
136 // Read a chunk
137 let chunk_indices = vec![0, 1];
138 let data_chunk: ArrayD<f32> = array.retrieve_chunk(&chunk_indices)?;
139 println!("retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");
140
141 // Read chunks
142 let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
143 let data_chunks: ArrayD<f32> = array.retrieve_chunks(&chunks)?;
144 println!("retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");
145
146 // Retrieve an array subset
147 let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
148 let data_subset: ArrayD<f32> = array.retrieve_array_subset(&subset)?;
149 println!("retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");
150
151 // Show the hierarchy
152 let node = Node::open(&store, "/").unwrap();
153 let tree = node.hierarchy_tree();
154 println!("hierarchy_tree:\n{}", tree);
155
156 Ok(())
157}10fn sharded_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
11 use std::sync::Arc;
12
13 use rayon::prelude::{IntoParallelIterator, ParallelIterator};
14 use zarrs::array::{ArraySubset, codec, data_type};
15 use zarrs::node::Node;
16 use zarrs::storage::store;
17
18 // Create a store
19 // let path = tempfile::TempDir::new()?;
20 // let mut store: ReadableWritableListableStorage =
21 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
22 // let mut store: ReadableWritableListableStorage = Arc::new(
23 // zarrs::filesystem::FilesystemStore::new("zarrs/tests/data/sharded_array_write_read.zarr")?,
24 // );
25 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
26 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
27 && arg1 == "--usage-log"
28 {
29 let log_writer = Arc::new(std::sync::Mutex::new(
30 // std::io::BufWriter::new(
31 std::io::stdout(),
32 // )
33 ));
34 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
35 chrono::Utc::now().format("[%T%.3f] ").to_string()
36 }));
37 }
38
39 // Create the root group
40 zarrs::group::GroupBuilder::new()
41 .build(store.clone(), "/")?
42 .store_metadata()?;
43
44 // Create a group with attributes
45 let group_path = "/group";
46 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
47 group
48 .attributes_mut()
49 .insert("foo".into(), serde_json::Value::String("bar".into()));
50 group.store_metadata()?;
51
52 // Create an array
53 let array_path = "/group/array";
54 let subchunk_shape = vec![4, 4];
55 let array = zarrs::array::ArrayBuilder::new(
56 vec![8, 8], // array shape
57 vec![4, 8], // chunk (shard) shape
58 data_type::uint16(),
59 0u16,
60 )
61 .subchunk_shape(subchunk_shape.clone())
62 .bytes_to_bytes_codecs(vec![
63 #[cfg(feature = "gzip")]
64 Arc::new(codec::GzipCodec::new(5)?),
65 ])
66 .dimension_names(["y", "x"].into())
67 // .storage_transformers(vec![].into())
68 .build(store.clone(), array_path)?;
69
70 // Write array metadata to store
71 array.store_metadata()?;
72
73 // The array metadata is
74 println!(
75 "The array metadata is:\n{}\n",
76 array.metadata().to_string_pretty()
77 );
78
79 // Use default codec options (concurrency etc)
80 let options = CodecOptions::default();
81
82 // Write some shards (in parallel)
83 (0..2).into_par_iter().try_for_each(|s| {
84 let chunk_grid = array.chunk_grid();
85 let chunk_indices = vec![s, 0];
86 if let Some(chunk_shape) = chunk_grid.chunk_shape(&chunk_indices)? {
87 let chunk_array = ndarray::ArrayD::<u16>::from_shape_fn(
88 chunk_shape
89 .iter()
90 .map(|u| u.get() as usize)
91 .collect::<Vec<_>>(),
92 |ij| {
93 (s * chunk_shape[0].get() * chunk_shape[1].get()
94 + ij[0] as u64 * chunk_shape[1].get()
95 + ij[1] as u64) as u16
96 },
97 );
98 array.store_chunk(&chunk_indices, chunk_array)
99 } else {
100 Err(zarrs::array::ArrayError::InvalidChunkGridIndicesError(
101 chunk_indices.to_vec(),
102 ))
103 }
104 })?;
105
106 // Read the whole array
107 let data_all: ArrayD<u16> = array.retrieve_array_subset(&array.subset_all())?;
108 println!("The whole array is:\n{data_all}\n");
109
110 // Read a shard back from the store
111 let shard_indices = vec![1, 0];
112 let data_shard: ArrayD<u16> = array.retrieve_chunk(&shard_indices)?;
113 println!("Shard [1,0] is:\n{data_shard}\n");
114
115 // Read a subchunk from the store
116 let subset_chunk_1_0 = ArraySubset::new_with_ranges(&[4..8, 0..4]);
117 let data_chunk: ArrayD<u16> = array.retrieve_array_subset(&subset_chunk_1_0)?;
118 println!("Chunk [1,0] is:\n{data_chunk}\n");
119
120 // Read the central 4x2 subset of the array
121 let subset_4x2 = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
122 let data_4x2: ArrayD<u16> = array.retrieve_array_subset(&subset_4x2)?;
123 println!("The middle 4x2 subset is:\n{data_4x2}\n");
124
125 // Decode subchunks
126 // In some cases, it might be preferable to decode subchunks in a shard directly.
127 // If using the partial decoder, then the shard index will only be read once from the store.
128 let partial_decoder = array.partial_decoder(&[0, 0])?;
129 println!("Decoded subchunks:");
130 for subchunk_subset in [
131 ArraySubset::new_with_start_shape(vec![0, 0], subchunk_shape.clone())?,
132 ArraySubset::new_with_start_shape(vec![0, 4], subchunk_shape.clone())?,
133 ] {
134 println!("{subchunk_subset}");
135 let decoded_subchunk_bytes = partial_decoder.partial_decode(&subchunk_subset, &options)?;
136 let ndarray = bytes_to_ndarray::<u16>(
137 &subchunk_shape,
138 decoded_subchunk_bytes.into_fixed()?.into_owned(),
139 )?;
140 println!("{ndarray}\n");
141 }
142
143 // Show the hierarchy
144 let node = Node::open(&store, "/").unwrap();
145 let tree = node.hierarchy_tree();
146 println!("The Zarr hierarchy tree is:\n{}", tree);
147
148 println!(
149 "The keys in the store are:\n[{}]",
150 store.list().unwrap_or_default().iter().format(", ")
151 );
152
153 Ok(())
154}8fn array_write_read() -> Result<(), Box<dyn std::error::Error>> {
9 use std::sync::Arc;
10
11 use zarrs::array::{ArraySubset, ZARR_NAN_F32, data_type};
12 use zarrs::node::Node;
13 use zarrs::storage::store;
14
15 // Create a store
16 // let path = tempfile::TempDir::new()?;
17 // let mut store: ReadableWritableListableStorage =
18 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
19 // let mut store: ReadableWritableListableStorage = Arc::new(
20 // zarrs::filesystem::FilesystemStore::new("zarrs/tests/data/array_write_read.zarr")?,
21 // );
22 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
23 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
24 && arg1 == "--usage-log"
25 {
26 let log_writer = Arc::new(std::sync::Mutex::new(
27 // std::io::BufWriter::new(
28 std::io::stdout(),
29 // )
30 ));
31 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
32 chrono::Utc::now().format("[%T%.3f] ").to_string()
33 }));
34 }
35
36 // Create the root group
37 zarrs::group::GroupBuilder::new()
38 .build(store.clone(), "/")?
39 .store_metadata()?;
40
41 // Create a group with attributes
42 let group_path = "/group";
43 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
44 group
45 .attributes_mut()
46 .insert("foo".into(), serde_json::Value::String("bar".into()));
47 group.store_metadata()?;
48
49 println!(
50 "The group metadata is:\n{}\n",
51 group.metadata().to_string_pretty()
52 );
53
54 // Create an array
55 let array_path = "/group/array";
56 let array = zarrs::array::ArrayBuilder::new(
57 vec![8, 8], // array shape
58 vec![4, 4], // regular chunk shape
59 data_type::float32(),
60 ZARR_NAN_F32,
61 )
62 // .bytes_to_bytes_codecs(vec![]) // uncompressed
63 .dimension_names(["y", "x"].into())
64 // .storage_transformers(vec![].into())
65 .build(store.clone(), array_path)?;
66
67 // Write array metadata to store
68 array.store_metadata()?;
69
70 println!(
71 "The array metadata is:\n{}\n",
72 array.metadata().to_string_pretty()
73 );
74
75 // Write some chunks
76 (0..2).into_par_iter().try_for_each(|i| {
77 let chunk_indices: Vec<u64> = vec![0, i];
78 let chunk_subset = array.chunk_grid().subset(&chunk_indices)?.ok_or_else(|| {
79 zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
80 })?;
81 array.store_chunk(
82 &chunk_indices,
83 ArrayD::<f32>::from_shape_vec(
84 chunk_subset.shape_usize(),
85 vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
86 )
87 .unwrap(),
88 )
89 })?;
90
91 let subset_all = array.subset_all();
92 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
93 println!("store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");
94
95 // Store multiple chunks
96 let ndarray_chunks: Array2<f32> = array![
97 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
98 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
99 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
100 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
101 ];
102 array.store_chunks(&[1..2, 0..2], ndarray_chunks)?;
103 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
104 println!("store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");
105
106 // Write a subset spanning multiple chunks, including updating chunks already written
107 let ndarray_subset: Array2<f32> =
108 array![[-3.3, -3.4, -3.5,], [-4.3, -4.4, -4.5,], [-5.3, -5.4, -5.5],];
109 array.store_array_subset(&[3..6, 3..6], ndarray_subset)?;
110 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
111 println!("store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");
112
113 // Store array subset
114 let ndarray_subset: Array2<f32> = array![
115 [-0.6],
116 [-1.6],
117 [-2.6],
118 [-3.6],
119 [-4.6],
120 [-5.6],
121 [-6.6],
122 [-7.6],
123 ];
124 array.store_array_subset(&[0..8, 6..7], ndarray_subset)?;
125 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
126 println!("store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");
127
128 // Store chunk subset
129 let ndarray_chunk_subset: Array2<f32> = array![[-7.4, -7.5, -7.6, -7.7],];
130 array.store_chunk_subset(
131 // chunk indices
132 &[1, 1],
133 // subset within chunk
134 &[3..4, 0..4],
135 ndarray_chunk_subset,
136 )?;
137 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
138 println!("store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");
139
140 // Erase a chunk
141 array.erase_chunk(&[0, 0])?;
142 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
143 println!("erase_chunk [0, 0]:\n{data_all:+4.1}\n");
144
145 // Read a chunk
146 let chunk_indices = vec![0, 1];
147 let data_chunk: ArrayD<f32> = array.retrieve_chunk(&chunk_indices)?;
148 println!("retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");
149
150 // Read chunks
151 let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
152 let data_chunks: ArrayD<f32> = array.retrieve_chunks(&chunks)?;
153 println!("retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");
154
155 // Retrieve an array subset
156 let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
157 let data_subset: ArrayD<f32> = array.retrieve_array_subset(&subset)?;
158 println!("retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");
159
160 // Show the hierarchy
161 let node = Node::open(&store, "/").unwrap();
162 let tree = node.hierarchy_tree();
163 println!("hierarchy_tree:\n{}", tree);
164
165 Ok(())
166}Sourcepub fn open_opt<TStorage: ?Sized + ReadableStorageTraits + ListableStorageTraits>(
storage: &Arc<TStorage>,
path: &str,
version: &MetadataRetrieveVersion,
) -> Result<Self, NodeCreateError>
pub fn open_opt<TStorage: ?Sized + ReadableStorageTraits + ListableStorageTraits>( storage: &Arc<TStorage>, path: &str, version: &MetadataRetrieveVersion, ) -> Result<Self, NodeCreateError>
Open a node at path and read metadata and children from storage with non-default MetadataRetrieveVersion.
§Errors
Returns NodeCreateError if metadata is invalid or there is a failure to list child nodes.
Sourcepub async fn async_open<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncListableStorageTraits>(
storage: Arc<TStorage>,
path: &str,
) -> Result<Self, NodeCreateError>
Available on crate feature async only.
pub async fn async_open<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncListableStorageTraits>( storage: Arc<TStorage>, path: &str, ) -> Result<Self, NodeCreateError>
async only.Asynchronously open a node at path and read metadata and children from storage with default MetadataRetrieveVersion.
§Errors
Returns NodeCreateError if metadata is invalid or there is a failure to list child nodes.
Examples found in repository?
8async fn async_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
9 use std::sync::Arc;
10
11 use futures::StreamExt;
12 use zarrs::array::{ArraySubset, ZARR_NAN_F32, data_type};
13 use zarrs::node::Node;
14
15 // Create a store
16 let mut store: AsyncReadableWritableListableStorage = Arc::new(
17 zarrs_object_store::AsyncObjectStore::new(object_store::memory::InMemory::new()),
18 );
19 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
20 && arg1 == "--usage-log"
21 {
22 let log_writer = Arc::new(std::sync::Mutex::new(
23 // std::io::BufWriter::new(
24 std::io::stdout(),
25 // )
26 ));
27 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
28 chrono::Utc::now().format("[%T%.3f] ").to_string()
29 }));
30 }
31
32 // Create the root group
33 zarrs::group::GroupBuilder::new()
34 .build(store.clone(), "/")?
35 .async_store_metadata()
36 .await?;
37
38 // Create a group with attributes
39 let group_path = "/group";
40 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
41 group
42 .attributes_mut()
43 .insert("foo".into(), serde_json::Value::String("bar".into()));
44 group.async_store_metadata().await?;
45
46 println!(
47 "The group metadata is:\n{}\n",
48 group.metadata().to_string_pretty()
49 );
50
51 // Create an array
52 let array_path = "/group/array";
53 let array = zarrs::array::ArrayBuilder::new(
54 vec![8, 8], // array shape
55 vec![4, 4], // regular chunk shape
56 data_type::float32(),
57 ZARR_NAN_F32,
58 )
59 // .bytes_to_bytes_codecs(vec![]) // uncompressed
60 .dimension_names(["y", "x"].into())
61 // .storage_transformers(vec![].into())
62 .build_arc(store.clone(), array_path)?;
63
64 // Write array metadata to store
65 array.async_store_metadata().await?;
66
67 println!(
68 "The array metadata is:\n{}\n",
69 array.metadata().to_string_pretty()
70 );
71
72 // Write some chunks
73 let store_chunk = |i: u64| {
74 let array = array.clone();
75 async move {
76 let chunk_indices: Vec<u64> = vec![0, i];
77 let chunk_subset = array.chunk_grid().subset(&chunk_indices)?.ok_or_else(|| {
78 zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
79 })?;
80 array
81 .async_store_chunk(
82 &chunk_indices,
83 vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
84 )
85 .await
86 }
87 };
88 futures::stream::iter(0..2)
89 .map(Ok)
90 .try_for_each_concurrent(None, store_chunk)
91 .await?;
92
93 let subset_all = array.subset_all();
94 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
95 println!("async_store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");
96
97 // Store multiple chunks
98 array
99 .async_store_chunks(
100 &[1..2, 0..2],
101 &[
102 //
103 1.0f32, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
104 //
105 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
106 ],
107 )
108 .await?;
109 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
110 println!("async_store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");
111
112 // Write a subset spanning multiple chunks, including updating chunks already written
113 array
114 .async_store_array_subset(
115 &[3..6, 3..6],
116 &[-3.3, -3.4, -3.5, -4.3, -4.4, -4.5, -5.3, -5.4, -5.5],
117 )
118 .await?;
119 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
120 println!("async_store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");
121
122 // Store array subset
123 array
124 .async_store_array_subset(
125 &[0..8, 6..7],
126 &[-0.6f32, -1.6, -2.6, -3.6, -4.6, -5.6, -6.6, -7.6],
127 )
128 .await?;
129 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
130 println!("async_store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");
131
132 // Store chunk subset
133 array
134 .async_store_chunk_subset(
135 // chunk indices
136 &[1, 1],
137 // subset within chunk
138 &[3..4, 0..4],
139 &[-7.4f32, -7.5, -7.6, -7.7],
140 )
141 .await?;
142 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
143 println!("async_store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");
144
145 // Erase a chunk
146 array.async_erase_chunk(&[0, 0]).await?;
147 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
148 println!("async_erase_chunk [0, 0]:\n{data_all:+4.1}\n");
149
150 // Read a chunk
151 let chunk_indices = vec![0, 1];
152 let data_chunk: ArrayD<f32> = array.async_retrieve_chunk(&chunk_indices).await?;
153 println!("async_retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");
154
155 // Read chunks
156 let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
157 let data_chunks: ArrayD<f32> = array.async_retrieve_chunks(&chunks).await?;
158 println!("async_retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");
159
160 // Retrieve an array subset
161 let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
162 let data_subset: ArrayD<f32> = array.async_retrieve_array_subset(&subset).await?;
163 println!("async_retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");
164
165 // Show the hierarchy
166 let node = Node::async_open(store, "/").await.unwrap();
167 let tree = node.hierarchy_tree();
168 println!("hierarchy_tree:\n{}", tree);
169
170 Ok(())
171}Sourcepub async fn async_open_opt<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncListableStorageTraits>(
storage: Arc<TStorage>,
path: &str,
version: &MetadataRetrieveVersion,
) -> Result<Self, NodeCreateError>
Available on crate feature async only.
pub async fn async_open_opt<TStorage: ?Sized + AsyncReadableStorageTraits + AsyncListableStorageTraits>( storage: Arc<TStorage>, path: &str, version: &MetadataRetrieveVersion, ) -> Result<Self, NodeCreateError>
async only.Asynchronously open a node at path and read metadata and children from storage with non-default MetadataRetrieveVersion.
§Errors
Returns NodeCreateError if metadata is invalid or there is a failure to list child nodes.
Sourcepub fn new_with_metadata(
path: NodePath,
metadata: NodeMetadata,
children: Vec<Self>,
) -> Self
pub fn new_with_metadata( path: NodePath, metadata: NodeMetadata, children: Vec<Self>, ) -> Self
Create a new node at path with metadata and children.
Sourcepub fn metadata(&self) -> &NodeMetadata
pub fn metadata(&self) -> &NodeMetadata
Returns a reference to the metadata of the node.
Sourcepub fn hierarchy_tree(&self) -> String
pub fn hierarchy_tree(&self) -> String
Return a tree representation of a hierarchy as a string.
Arrays are annotated with their shape and data type. For example:
a
baz [10000, 1000] float64
foo [10000, 1000] float64
bExamples found in repository?
10fn rectilinear_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
11 use rayon::prelude::{IntoParallelIterator, ParallelIterator};
12 use zarrs::array::{ArraySubset, ZARR_NAN_F32, codec, data_type};
13 use zarrs::node::Node;
14 use zarrs::storage::store;
15
16 // Create a store
17 // let path = tempfile::TempDir::new()?;
18 // let mut store: ReadableWritableListableStorage =
19 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
20 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
21 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
22 && arg1 == "--usage-log"
23 {
24 let log_writer = Arc::new(std::sync::Mutex::new(
25 // std::io::BufWriter::new(
26 std::io::stdout(),
27 // )
28 ));
29 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
30 chrono::Utc::now().format("[%T%.3f] ").to_string()
31 }));
32 }
33
34 // Create the root group
35 zarrs::group::GroupBuilder::new()
36 .build(store.clone(), "/")?
37 .store_metadata()?;
38
39 // Create a group with attributes
40 let group_path = "/group";
41 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
42 group
43 .attributes_mut()
44 .insert("foo".into(), serde_json::Value::String("bar".into()));
45 group.store_metadata()?;
46
47 println!(
48 "The group metadata is:\n{}\n",
49 group.metadata().to_string_pretty()
50 );
51
52 // Create an array
53 let array_path = "/group/array";
54 let array = zarrs::array::ArrayBuilder::new(
55 vec![8, 8], // array shape
56 MetadataV3::new_with_configuration(
57 "rectilinear",
58 RectilinearChunkGridConfiguration::Inline {
59 chunk_shapes: vec![
60 // Varying: chunk sizes [1, 1, 1, 3, 2] (run-length encoded as [[1,3], 3, 2])
61 ChunkEdgeLengths::Varying(serde_json::from_str("[[1,3], 3, 2]").unwrap()),
62 // Scalar: regular 4-element chunks
63 ChunkEdgeLengths::Scalar(NonZeroU64::new(4).unwrap()),
64 ],
65 },
66 ),
67 data_type::float32(),
68 ZARR_NAN_F32,
69 )
70 .bytes_to_bytes_codecs(vec![
71 #[cfg(feature = "gzip")]
72 Arc::new(codec::GzipCodec::new(5)?),
73 ])
74 .dimension_names(["y", "x"].into())
75 // .storage_transformers(vec![].into())
76 .build(store.clone(), array_path)?;
77
78 // Write array metadata to store
79 array.store_metadata()?;
80
81 // Write some chunks (in parallel)
82 (0..4).into_par_iter().try_for_each(|i| {
83 let chunk_grid = array.chunk_grid();
84 let chunk_indices = vec![i, 0];
85 if let Some(chunk_shape) = chunk_grid.chunk_shape(&chunk_indices)? {
86 let chunk_array = ndarray::ArrayD::<f32>::from_elem(
87 chunk_shape
88 .iter()
89 .map(|u| u.get() as usize)
90 .collect::<Vec<_>>(),
91 i as f32,
92 );
93 array.store_chunk(&chunk_indices, chunk_array)
94 } else {
95 Err(zarrs::array::ArrayError::InvalidChunkGridIndicesError(
96 chunk_indices.to_vec(),
97 ))
98 }
99 })?;
100
101 println!(
102 "The array metadata is:\n{}\n",
103 array.metadata().to_string_pretty()
104 );
105
106 // Write a subset spanning multiple chunks, including updating chunks already written
107 array.store_array_subset(
108 &[3..6, 3..6], // start
109 ndarray::ArrayD::<f32>::from_shape_vec(
110 vec![3, 3],
111 vec![0.1f32, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
112 )?,
113 )?;
114
115 // Store elements directly, in this case set the 7th column to 123.0
116 array.store_array_subset(&[0..8, 6..7], &[123.0f32; 8])?;
117
118 // Store elements directly in a chunk, in this case set the last row of the bottom right chunk
119 array.store_chunk_subset(
120 // chunk indices
121 &[3, 1],
122 // subset within chunk
123 &[1..2, 0..4],
124 &[-4.0f32; 4],
125 )?;
126
127 // Read the whole array
128 let data_all: ndarray::ArrayD<f32> = array.retrieve_array_subset(&array.subset_all())?;
129 println!("The whole array is:\n{data_all}\n");
130
131 // Read a chunk back from the store
132 let chunk_indices = vec![1, 0];
133 let data_chunk: ndarray::ArrayD<f32> = array.retrieve_chunk(&chunk_indices)?;
134 println!("Chunk [1,0] is:\n{data_chunk}\n");
135
136 // Read the central 4x2 subset of the array
137 let subset_4x2 = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
138 let data_4x2: ndarray::ArrayD<f32> = array.retrieve_array_subset(&subset_4x2)?;
139 println!("The middle 4x2 subset is:\n{data_4x2}\n");
140
141 // Show the hierarchy
142 let node = Node::open(&store, "/").unwrap();
143 let tree = node.hierarchy_tree();
144 println!("The Zarr hierarchy tree is:\n{tree}");
145
146 Ok(())
147}More examples
8fn array_write_read() -> Result<(), Box<dyn std::error::Error>> {
9 use std::sync::Arc;
10
11 use zarrs::array::{ArraySubset, ZARR_NAN_F32, data_type};
12 use zarrs::node::Node;
13 use zarrs::storage::store;
14
15 // Create a store
16 // let path = tempfile::TempDir::new()?;
17 // let mut store: ReadableWritableListableStorage =
18 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
19 // let mut store: ReadableWritableListableStorage = Arc::new(
20 // zarrs::filesystem::FilesystemStore::new("zarrs/tests/data/array_write_read.zarr")?,
21 // );
22 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
23 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
24 && arg1 == "--usage-log"
25 {
26 let log_writer = Arc::new(std::sync::Mutex::new(
27 // std::io::BufWriter::new(
28 std::io::stdout(),
29 // )
30 ));
31 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
32 chrono::Utc::now().format("[%T%.3f] ").to_string()
33 }));
34 }
35
36 // Create the root group
37 zarrs::group::GroupBuilder::new()
38 .build(store.clone(), "/")?
39 .store_metadata()?;
40
41 // Create a group with attributes
42 let group_path = "/group";
43 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
44 group
45 .attributes_mut()
46 .insert("foo".into(), serde_json::Value::String("bar".into()));
47 group.store_metadata()?;
48
49 println!(
50 "The group metadata is:\n{}\n",
51 group.metadata().to_string_pretty()
52 );
53
54 // Create an array
55 let array_path = "/group/array";
56 let array = zarrs::array::ArrayBuilder::new(
57 vec![8, 8], // array shape
58 vec![4, 4], // regular chunk shape
59 data_type::float32(),
60 ZARR_NAN_F32,
61 )
62 // .bytes_to_bytes_codecs(vec![]) // uncompressed
63 .dimension_names(["y", "x"].into())
64 // .storage_transformers(vec![].into())
65 .build(store.clone(), array_path)?;
66
67 // Write array metadata to store
68 array.store_metadata()?;
69
70 println!(
71 "The array metadata is:\n{}\n",
72 array.metadata().to_string_pretty()
73 );
74
75 // Write some chunks
76 (0..2).into_par_iter().try_for_each(|i| {
77 let chunk_indices: Vec<u64> = vec![0, i];
78 let chunk_subset = array.chunk_grid().subset(&chunk_indices)?.ok_or_else(|| {
79 zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
80 })?;
81 array.store_chunk(
82 &chunk_indices,
83 vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
84 )
85 })?;
86
87 let subset_all = array.subset_all();
88 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
89 println!("store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");
90
91 // Store multiple chunks
92 array.store_chunks(
93 &[1..2, 0..2],
94 &[
95 //
96 1.0f32, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
97 //
98 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
99 ],
100 )?;
101 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
102 println!("store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");
103
104 // Write a subset spanning multiple chunks, including updating chunks already written
105 array.store_array_subset(
106 &[3..6, 3..6],
107 &[-3.3f32, -3.4, -3.5, -4.3, -4.4, -4.5, -5.3, -5.4, -5.5],
108 )?;
109 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
110 println!("store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");
111
112 // Store array subset
113 array.store_array_subset(
114 &[0..8, 6..7],
115 &[-0.6f32, -1.6, -2.6, -3.6, -4.6, -5.6, -6.6, -7.6],
116 )?;
117 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
118 println!("store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");
119
120 // Store chunk subset
121 array.store_chunk_subset(
122 // chunk indices
123 &[1, 1],
124 // subset within chunk
125 &[3..4, 0..4],
126 &[-7.4f32, -7.5, -7.6, -7.7],
127 )?;
128 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
129 println!("store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");
130
131 // Erase a chunk
132 array.erase_chunk(&[0, 0])?;
133 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
134 println!("erase_chunk [0, 0]:\n{data_all:+4.1}\n");
135
136 // Read a chunk
137 let chunk_indices = vec![0, 1];
138 let data_chunk: ArrayD<f32> = array.retrieve_chunk(&chunk_indices)?;
139 println!("retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");
140
141 // Read chunks
142 let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
143 let data_chunks: ArrayD<f32> = array.retrieve_chunks(&chunks)?;
144 println!("retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");
145
146 // Retrieve an array subset
147 let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
148 let data_subset: ArrayD<f32> = array.retrieve_array_subset(&subset)?;
149 println!("retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");
150
151 // Show the hierarchy
152 let node = Node::open(&store, "/").unwrap();
153 let tree = node.hierarchy_tree();
154 println!("hierarchy_tree:\n{}", tree);
155
156 Ok(())
157}10fn sharded_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
11 use std::sync::Arc;
12
13 use rayon::prelude::{IntoParallelIterator, ParallelIterator};
14 use zarrs::array::{ArraySubset, codec, data_type};
15 use zarrs::node::Node;
16 use zarrs::storage::store;
17
18 // Create a store
19 // let path = tempfile::TempDir::new()?;
20 // let mut store: ReadableWritableListableStorage =
21 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
22 // let mut store: ReadableWritableListableStorage = Arc::new(
23 // zarrs::filesystem::FilesystemStore::new("zarrs/tests/data/sharded_array_write_read.zarr")?,
24 // );
25 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
26 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
27 && arg1 == "--usage-log"
28 {
29 let log_writer = Arc::new(std::sync::Mutex::new(
30 // std::io::BufWriter::new(
31 std::io::stdout(),
32 // )
33 ));
34 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
35 chrono::Utc::now().format("[%T%.3f] ").to_string()
36 }));
37 }
38
39 // Create the root group
40 zarrs::group::GroupBuilder::new()
41 .build(store.clone(), "/")?
42 .store_metadata()?;
43
44 // Create a group with attributes
45 let group_path = "/group";
46 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
47 group
48 .attributes_mut()
49 .insert("foo".into(), serde_json::Value::String("bar".into()));
50 group.store_metadata()?;
51
52 // Create an array
53 let array_path = "/group/array";
54 let subchunk_shape = vec![4, 4];
55 let array = zarrs::array::ArrayBuilder::new(
56 vec![8, 8], // array shape
57 vec![4, 8], // chunk (shard) shape
58 data_type::uint16(),
59 0u16,
60 )
61 .subchunk_shape(subchunk_shape.clone())
62 .bytes_to_bytes_codecs(vec![
63 #[cfg(feature = "gzip")]
64 Arc::new(codec::GzipCodec::new(5)?),
65 ])
66 .dimension_names(["y", "x"].into())
67 // .storage_transformers(vec![].into())
68 .build(store.clone(), array_path)?;
69
70 // Write array metadata to store
71 array.store_metadata()?;
72
73 // The array metadata is
74 println!(
75 "The array metadata is:\n{}\n",
76 array.metadata().to_string_pretty()
77 );
78
79 // Use default codec options (concurrency etc)
80 let options = CodecOptions::default();
81
82 // Write some shards (in parallel)
83 (0..2).into_par_iter().try_for_each(|s| {
84 let chunk_grid = array.chunk_grid();
85 let chunk_indices = vec![s, 0];
86 if let Some(chunk_shape) = chunk_grid.chunk_shape(&chunk_indices)? {
87 let chunk_array = ndarray::ArrayD::<u16>::from_shape_fn(
88 chunk_shape
89 .iter()
90 .map(|u| u.get() as usize)
91 .collect::<Vec<_>>(),
92 |ij| {
93 (s * chunk_shape[0].get() * chunk_shape[1].get()
94 + ij[0] as u64 * chunk_shape[1].get()
95 + ij[1] as u64) as u16
96 },
97 );
98 array.store_chunk(&chunk_indices, chunk_array)
99 } else {
100 Err(zarrs::array::ArrayError::InvalidChunkGridIndicesError(
101 chunk_indices.to_vec(),
102 ))
103 }
104 })?;
105
106 // Read the whole array
107 let data_all: ArrayD<u16> = array.retrieve_array_subset(&array.subset_all())?;
108 println!("The whole array is:\n{data_all}\n");
109
110 // Read a shard back from the store
111 let shard_indices = vec![1, 0];
112 let data_shard: ArrayD<u16> = array.retrieve_chunk(&shard_indices)?;
113 println!("Shard [1,0] is:\n{data_shard}\n");
114
115 // Read a subchunk from the store
116 let subset_chunk_1_0 = ArraySubset::new_with_ranges(&[4..8, 0..4]);
117 let data_chunk: ArrayD<u16> = array.retrieve_array_subset(&subset_chunk_1_0)?;
118 println!("Chunk [1,0] is:\n{data_chunk}\n");
119
120 // Read the central 4x2 subset of the array
121 let subset_4x2 = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
122 let data_4x2: ArrayD<u16> = array.retrieve_array_subset(&subset_4x2)?;
123 println!("The middle 4x2 subset is:\n{data_4x2}\n");
124
125 // Decode subchunks
126 // In some cases, it might be preferable to decode subchunks in a shard directly.
127 // If using the partial decoder, then the shard index will only be read once from the store.
128 let partial_decoder = array.partial_decoder(&[0, 0])?;
129 println!("Decoded subchunks:");
130 for subchunk_subset in [
131 ArraySubset::new_with_start_shape(vec![0, 0], subchunk_shape.clone())?,
132 ArraySubset::new_with_start_shape(vec![0, 4], subchunk_shape.clone())?,
133 ] {
134 println!("{subchunk_subset}");
135 let decoded_subchunk_bytes = partial_decoder.partial_decode(&subchunk_subset, &options)?;
136 let ndarray = bytes_to_ndarray::<u16>(
137 &subchunk_shape,
138 decoded_subchunk_bytes.into_fixed()?.into_owned(),
139 )?;
140 println!("{ndarray}\n");
141 }
142
143 // Show the hierarchy
144 let node = Node::open(&store, "/").unwrap();
145 let tree = node.hierarchy_tree();
146 println!("The Zarr hierarchy tree is:\n{}", tree);
147
148 println!(
149 "The keys in the store are:\n[{}]",
150 store.list().unwrap_or_default().iter().format(", ")
151 );
152
153 Ok(())
154}8fn array_write_read() -> Result<(), Box<dyn std::error::Error>> {
9 use std::sync::Arc;
10
11 use zarrs::array::{ArraySubset, ZARR_NAN_F32, data_type};
12 use zarrs::node::Node;
13 use zarrs::storage::store;
14
15 // Create a store
16 // let path = tempfile::TempDir::new()?;
17 // let mut store: ReadableWritableListableStorage =
18 // Arc::new(zarrs::filesystem::FilesystemStore::new(path.path())?);
19 // let mut store: ReadableWritableListableStorage = Arc::new(
20 // zarrs::filesystem::FilesystemStore::new("zarrs/tests/data/array_write_read.zarr")?,
21 // );
22 let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
23 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
24 && arg1 == "--usage-log"
25 {
26 let log_writer = Arc::new(std::sync::Mutex::new(
27 // std::io::BufWriter::new(
28 std::io::stdout(),
29 // )
30 ));
31 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
32 chrono::Utc::now().format("[%T%.3f] ").to_string()
33 }));
34 }
35
36 // Create the root group
37 zarrs::group::GroupBuilder::new()
38 .build(store.clone(), "/")?
39 .store_metadata()?;
40
41 // Create a group with attributes
42 let group_path = "/group";
43 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
44 group
45 .attributes_mut()
46 .insert("foo".into(), serde_json::Value::String("bar".into()));
47 group.store_metadata()?;
48
49 println!(
50 "The group metadata is:\n{}\n",
51 group.metadata().to_string_pretty()
52 );
53
54 // Create an array
55 let array_path = "/group/array";
56 let array = zarrs::array::ArrayBuilder::new(
57 vec![8, 8], // array shape
58 vec![4, 4], // regular chunk shape
59 data_type::float32(),
60 ZARR_NAN_F32,
61 )
62 // .bytes_to_bytes_codecs(vec![]) // uncompressed
63 .dimension_names(["y", "x"].into())
64 // .storage_transformers(vec![].into())
65 .build(store.clone(), array_path)?;
66
67 // Write array metadata to store
68 array.store_metadata()?;
69
70 println!(
71 "The array metadata is:\n{}\n",
72 array.metadata().to_string_pretty()
73 );
74
75 // Write some chunks
76 (0..2).into_par_iter().try_for_each(|i| {
77 let chunk_indices: Vec<u64> = vec![0, i];
78 let chunk_subset = array.chunk_grid().subset(&chunk_indices)?.ok_or_else(|| {
79 zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
80 })?;
81 array.store_chunk(
82 &chunk_indices,
83 ArrayD::<f32>::from_shape_vec(
84 chunk_subset.shape_usize(),
85 vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
86 )
87 .unwrap(),
88 )
89 })?;
90
91 let subset_all = array.subset_all();
92 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
93 println!("store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");
94
95 // Store multiple chunks
96 let ndarray_chunks: Array2<f32> = array![
97 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
98 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
99 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
100 [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
101 ];
102 array.store_chunks(&[1..2, 0..2], ndarray_chunks)?;
103 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
104 println!("store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");
105
106 // Write a subset spanning multiple chunks, including updating chunks already written
107 let ndarray_subset: Array2<f32> =
108 array![[-3.3, -3.4, -3.5,], [-4.3, -4.4, -4.5,], [-5.3, -5.4, -5.5],];
109 array.store_array_subset(&[3..6, 3..6], ndarray_subset)?;
110 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
111 println!("store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");
112
113 // Store array subset
114 let ndarray_subset: Array2<f32> = array![
115 [-0.6],
116 [-1.6],
117 [-2.6],
118 [-3.6],
119 [-4.6],
120 [-5.6],
121 [-6.6],
122 [-7.6],
123 ];
124 array.store_array_subset(&[0..8, 6..7], ndarray_subset)?;
125 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
126 println!("store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");
127
128 // Store chunk subset
129 let ndarray_chunk_subset: Array2<f32> = array![[-7.4, -7.5, -7.6, -7.7],];
130 array.store_chunk_subset(
131 // chunk indices
132 &[1, 1],
133 // subset within chunk
134 &[3..4, 0..4],
135 ndarray_chunk_subset,
136 )?;
137 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
138 println!("store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");
139
140 // Erase a chunk
141 array.erase_chunk(&[0, 0])?;
142 let data_all: ArrayD<f32> = array.retrieve_array_subset(&subset_all)?;
143 println!("erase_chunk [0, 0]:\n{data_all:+4.1}\n");
144
145 // Read a chunk
146 let chunk_indices = vec![0, 1];
147 let data_chunk: ArrayD<f32> = array.retrieve_chunk(&chunk_indices)?;
148 println!("retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");
149
150 // Read chunks
151 let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
152 let data_chunks: ArrayD<f32> = array.retrieve_chunks(&chunks)?;
153 println!("retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");
154
155 // Retrieve an array subset
156 let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
157 let data_subset: ArrayD<f32> = array.retrieve_array_subset(&subset)?;
158 println!("retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");
159
160 // Show the hierarchy
161 let node = Node::open(&store, "/").unwrap();
162 let tree = node.hierarchy_tree();
163 println!("hierarchy_tree:\n{}", tree);
164
165 Ok(())
166}8async fn async_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
9 use std::sync::Arc;
10
11 use futures::StreamExt;
12 use zarrs::array::{ArraySubset, ZARR_NAN_F32, data_type};
13 use zarrs::node::Node;
14
15 // Create a store
16 let mut store: AsyncReadableWritableListableStorage = Arc::new(
17 zarrs_object_store::AsyncObjectStore::new(object_store::memory::InMemory::new()),
18 );
19 if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1)
20 && arg1 == "--usage-log"
21 {
22 let log_writer = Arc::new(std::sync::Mutex::new(
23 // std::io::BufWriter::new(
24 std::io::stdout(),
25 // )
26 ));
27 store = Arc::new(UsageLogStorageAdapter::new(store, log_writer, || {
28 chrono::Utc::now().format("[%T%.3f] ").to_string()
29 }));
30 }
31
32 // Create the root group
33 zarrs::group::GroupBuilder::new()
34 .build(store.clone(), "/")?
35 .async_store_metadata()
36 .await?;
37
38 // Create a group with attributes
39 let group_path = "/group";
40 let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;
41 group
42 .attributes_mut()
43 .insert("foo".into(), serde_json::Value::String("bar".into()));
44 group.async_store_metadata().await?;
45
46 println!(
47 "The group metadata is:\n{}\n",
48 group.metadata().to_string_pretty()
49 );
50
51 // Create an array
52 let array_path = "/group/array";
53 let array = zarrs::array::ArrayBuilder::new(
54 vec![8, 8], // array shape
55 vec![4, 4], // regular chunk shape
56 data_type::float32(),
57 ZARR_NAN_F32,
58 )
59 // .bytes_to_bytes_codecs(vec![]) // uncompressed
60 .dimension_names(["y", "x"].into())
61 // .storage_transformers(vec![].into())
62 .build_arc(store.clone(), array_path)?;
63
64 // Write array metadata to store
65 array.async_store_metadata().await?;
66
67 println!(
68 "The array metadata is:\n{}\n",
69 array.metadata().to_string_pretty()
70 );
71
72 // Write some chunks
73 let store_chunk = |i: u64| {
74 let array = array.clone();
75 async move {
76 let chunk_indices: Vec<u64> = vec![0, i];
77 let chunk_subset = array.chunk_grid().subset(&chunk_indices)?.ok_or_else(|| {
78 zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
79 })?;
80 array
81 .async_store_chunk(
82 &chunk_indices,
83 vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
84 )
85 .await
86 }
87 };
88 futures::stream::iter(0..2)
89 .map(Ok)
90 .try_for_each_concurrent(None, store_chunk)
91 .await?;
92
93 let subset_all = array.subset_all();
94 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
95 println!("async_store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");
96
97 // Store multiple chunks
98 array
99 .async_store_chunks(
100 &[1..2, 0..2],
101 &[
102 //
103 1.0f32, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
104 //
105 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
106 ],
107 )
108 .await?;
109 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
110 println!("async_store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");
111
112 // Write a subset spanning multiple chunks, including updating chunks already written
113 array
114 .async_store_array_subset(
115 &[3..6, 3..6],
116 &[-3.3, -3.4, -3.5, -4.3, -4.4, -4.5, -5.3, -5.4, -5.5],
117 )
118 .await?;
119 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
120 println!("async_store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");
121
122 // Store array subset
123 array
124 .async_store_array_subset(
125 &[0..8, 6..7],
126 &[-0.6f32, -1.6, -2.6, -3.6, -4.6, -5.6, -6.6, -7.6],
127 )
128 .await?;
129 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
130 println!("async_store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");
131
132 // Store chunk subset
133 array
134 .async_store_chunk_subset(
135 // chunk indices
136 &[1, 1],
137 // subset within chunk
138 &[3..4, 0..4],
139 &[-7.4f32, -7.5, -7.6, -7.7],
140 )
141 .await?;
142 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
143 println!("async_store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");
144
145 // Erase a chunk
146 array.async_erase_chunk(&[0, 0]).await?;
147 let data_all: ArrayD<f32> = array.async_retrieve_array_subset(&subset_all).await?;
148 println!("async_erase_chunk [0, 0]:\n{data_all:+4.1}\n");
149
150 // Read a chunk
151 let chunk_indices = vec![0, 1];
152 let data_chunk: ArrayD<f32> = array.async_retrieve_chunk(&chunk_indices).await?;
153 println!("async_retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");
154
155 // Read chunks
156 let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
157 let data_chunks: ArrayD<f32> = array.async_retrieve_chunks(&chunks).await?;
158 println!("async_retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");
159
160 // Retrieve an array subset
161 let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
162 let data_subset: ArrayD<f32> = array.async_retrieve_array_subset(&subset).await?;
163 println!("async_retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");
164
165 // Show the hierarchy
166 let node = Node::async_open(store, "/").await.unwrap();
167 let tree = node.hierarchy_tree();
168 println!("hierarchy_tree:\n{}", tree);
169
170 Ok(())
171}Sourcepub fn consolidate_metadata(&self) -> Option<ConsolidatedMetadataMetadata>
pub fn consolidate_metadata(&self) -> Option<ConsolidatedMetadataMetadata>
Consolidate metadata. Returns None for an array.
ConsolidatedMetadataMetadata can be converted into ConsolidatedMetadata in GroupMetadataV3.
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Node
impl Send for Node
impl Sync for Node
impl RefUnwindSafe for Node
impl Unpin for Node
impl UnsafeUnpin for Node
impl UnwindSafe for Node
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more