brimfile.file

  1import numpy as np
  2import warnings
  3
  4from .data import Data
  5from .metadata import Metadata
  6
  7from .utils import concatenate_paths
  8from .constants import brim_obj_names
  9from . import units
 10
 11from .file_abstraction import FileAbstraction, StoreType, sync
 12
 13# don't import _AbstractFile if running in pyodide (it is defined in js)
 14import sys
 15if "pyodide" not in sys.modules:
 16    from .file_abstraction import _AbstractFile
 17
 18__docformat__ = "google"
 19
 20class File:
 21    """
 22    Represents a brim file with Brillouin data, extending h5py.File.
 23    """
 24
 25    if "pyodide" in sys.modules:
 26        def __init__(self, file):
 27            self._file = file
 28            if not self.is_valid():
 29                raise ValueError("The brim file is not valid!")
 30    else:
 31        def __init__(self, filename: str, mode: str = 'r', store_type: StoreType = StoreType.AUTO):
 32            """
 33            Initialize the File object.
 34
 35            Args:
 36                filename (str): Path to the brim file.
 37                mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only).
 38                            See the definition of `mode` in `brimfile.file_abstraction._zarrFile.__init__()` for more details.
 39                            'r' means read only (must exist); 'r+' means read/write (must exist);
 40                            'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
 41                store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 42            """
 43            self._file = _AbstractFile(
 44                filename, mode=mode, store_type=store_type)
 45            if not self.is_valid():
 46                raise ValueError("The brim file is not valid!")
 47            
 48    def __del__(self):
 49        try:
 50            if hasattr(self, '_file'):
 51                self.close()
 52        except Exception as e:            
 53            # don't throw an error if the file cannot be closed
 54            warnings.warn(f"Cannot close the file: {e}")
 55
 56    def close(self) -> None:
 57        self._file.close()
 58
 59    def is_read_only(self) -> bool:
 60        return sync(self._file.is_read_only())
 61
 62    def is_valid(self) -> bool:
 63        """
 64        Check if the file is a valid brim file.
 65
 66        Returns:
 67            bool: True if the file is valid, False otherwise.
 68        """
 69        # TODO validate file against https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md
 70        return True
 71
 72    @classmethod
 73    def create(cls, filename: str, store_type: StoreType = StoreType.AUTO, brim_version: str = '0.1') -> 'File':
 74        """
 75        Create a new brim file with the specified filename. If the file exists already it will generate an error.
 76
 77        Args:
 78            filename (str): Path to the brim file to be created.
 79            store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 80            brim_version (str): Version of the brim file format to use. Default is '0.1'.
 81
 82        Returns:
 83            File: An instance of the File class representing the newly created brim file.
 84            store_type (str): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 85        """
 86        f = cls(filename, mode='w-', store_type=store_type)
 87
 88        # File version
 89        sync(f._file.create_attr('/', 'brim_version', brim_version))
 90
 91        # Root Brillouin_data group
 92        fr = sync(f._file.create_group(brim_obj_names.Brillouin_base_path))
 93
 94        return f
 95
 96    def create_data_group(self, PSD: np.ndarray, frequency: np.ndarray, px_size_um: tuple, *, index: int = None,
 97                          name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
 98        """
 99        Adds a new data entry to the file.
100        Parameters:
101            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
102            frequency (np.ndarray): The frequency data corresponding to the PSD. It must be broadcastable to the PSD shape (the most common case is frequency being 1D, in which case the frequency axis is assumed the same for all the spatial coordinates)
103            px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
104            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
105            name (str, optional): The name for the new data group. Defaults to None.
106            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
107        Returns:
108            Data: The newly created Data object.
109        Raises:
110            IndexError: If the specified index already exists in the dataset.
111            ValueError: If any of the data provided is not valid or consistent
112        """
113        if PSD.ndim != 4:
114            raise ValueError(
115                "'PSD' must have 4 dimensions (z, y, x, spectrum)")
116        try:
117            np.broadcast_shapes(tuple(frequency.shape), tuple(PSD.shape))
118        except ValueError as e:
119            raise ValueError(f"frequency (shape: {frequency.shape}) is not broadcastable to PSD (shape: {PSD.shape}): {e}")
120        if len(px_size_um) != 3:
121            raise ValueError("'px_size_um' must have 3 elements (z,y,x); unused dimensions can be set to nan")
122
123        return self._create_data_group_raw(PSD, frequency, scanning = None, sparse = False, px_size_um=px_size_um, 
124                                             index=index, name=name, compression=compression)
125
126    def create_data_group_sparse(self, PSD: np.ndarray, frequency: np.ndarray, scanning: dict, *, timestamp: np.ndarray = None,
127                                index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
128        """
129        Adds a new [sparse data entry](https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md) to the file.
130        
131        Sparse data allows storage of spectra in a flattened format (first dimension is the spectrum index),
132        with spatial mapping provided separately. This is efficient for data with irregular sampling or missing pixels.
133        
134        Parameters:
135            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. First dimension is spectrum index,
136                last dimension contains the spectral data. Shape: (n_spectra, ..., n_freq_points).
137            frequency (np.ndarray): The frequency data corresponding to the PSD. Must be broadcastable to the PSD array.
138            scanning (dict): Dictionary defining the spatial mapping. Must include at least 'Spatial_map' or 'Cartesian_visualisation'.
139                See `brimfile.data.Data._add_data` docstring for detailed structure of the scanning dictionary.
140            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
141            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
142            name (str, optional): The name for the new data group. Defaults to None.
143            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
144        Returns:
145            Data: The newly created Data object.
146        Raises:
147            IndexError: If the specified index already exists in the dataset.
148            ValueError: If any of the data provided is not valid or consistent
149        """
150        return self._create_data_group_raw(PSD, frequency, scanning=scanning, timestamp=timestamp, sparse=True, index=index, name=name, compression=compression)   
151    
152    def _create_data_group_raw(self, PSD: np.ndarray, frequency: np.ndarray, *, scanning: dict = None, px_size_um = None, timestamp: np.ndarray = None, sparse: bool = False,
153                                index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
154        """
155        Adds a new data entry to the file. Check the documentation for `brimfile.data.Data._add_data` for more details on the parameters.
156        Parameters:
157            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. The last dimension contains the spectra.
158            frequency (np.ndarray): The frequency data corresponding to the PSD. Must be broadcastable to the PSD array.
159            scanning (dict, optional): Spatial mapping metadata. Required for sparse=True, optional for sparse=False.
160                See `brimfile.data.Data._add_data` docstring for detailed structure.
161            px_size_um (tuple, optional): A tuple of 3 elements (z, y, x) for pixel size in μm. For non-sparse data only.
162            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
163            sparse (bool): Whether the data is sparse. See https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md for details. Defaults to False.
164            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
165            name (str, optional): The name for the new data group. Defaults to None.
166            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
167        Returns:
168            Data: The newly created Data object.
169        Raises:
170            IndexError: If the specified index already exists in the dataset.
171            ValueError: If any of the data provided is not valid or consistent
172        """
173        if index is not None:
174            if Data._get_existing_group_name(self._file, index) is not None:
175                raise IndexError(
176                    f"Data {index} already exists in {self._file.filename}")
177        else:
178            data_groups = self.list_data_groups()
179            indices = [dg['index'] for dg in data_groups]
180            indices.sort()
181            index = indices[-1] + 1 if indices else 0  # Next available index
182
183        # create the data group
184        d = Data._create_new(self._file, index, sparse, name)
185        # add the pixel size as an attribute of the data group
186        if px_size_um is not None:
187            sync(self._file.create_attr(d._group, 'element_size', tuple(px_size_um)))
188            units.add_to_attribute(self._file, d._group, 'element_size', 'um')
189        elif not sparse:
190            warnings.warn("Pixel size is not provided for non-sparse data. It is recommended to provide it for proper spatial calibration and visualization.")
191        # add the data to the data group
192        d._add_data(PSD, frequency, scanning = scanning,
193                   timestamp=timestamp, compression=compression)
194        return d
195
196    def list_data_groups(self, retrieve_custom_name=False) -> list:
197        """
198        List all data groups in the brim file.
199
200        Returns:
201            See documentation of brimfile.data.Data.list_data_groups
202        """
203        return Data.list_data_groups(self._file, retrieve_custom_name)
204
205    def get_data(self, index: int = 0) -> 'Data':
206        """
207        Retrieve a Data object for the specified index.
208
209        Args:
210            index (int): The index of the data group to retrieve.
211
212        Returns:
213            Data: The Data object corresponding to the specified index.
214        """
215        group_name: str = Data._get_existing_group_name(self._file, index)
216        if group_name is None:
217            raise IndexError(f"Data {index} not found")
218        data = Data(self._file, concatenate_paths(
219            brim_obj_names.Brillouin_base_path, group_name))
220        return data
221
222    @property
223    def filename(self) -> str:
224        """
225        Get the filename of the brim file.
226
227        Returns:
228            str: The filename of the brim file.
229        """
230        return self._file.filename
class File:
 21class File:
 22    """
 23    Represents a brim file with Brillouin data, extending h5py.File.
 24    """
 25
 26    if "pyodide" in sys.modules:
 27        def __init__(self, file):
 28            self._file = file
 29            if not self.is_valid():
 30                raise ValueError("The brim file is not valid!")
 31    else:
 32        def __init__(self, filename: str, mode: str = 'r', store_type: StoreType = StoreType.AUTO):
 33            """
 34            Initialize the File object.
 35
 36            Args:
 37                filename (str): Path to the brim file.
 38                mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only).
 39                            See the definition of `mode` in `brimfile.file_abstraction._zarrFile.__init__()` for more details.
 40                            'r' means read only (must exist); 'r+' means read/write (must exist);
 41                            'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
 42                store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 43            """
 44            self._file = _AbstractFile(
 45                filename, mode=mode, store_type=store_type)
 46            if not self.is_valid():
 47                raise ValueError("The brim file is not valid!")
 48            
 49    def __del__(self):
 50        try:
 51            if hasattr(self, '_file'):
 52                self.close()
 53        except Exception as e:            
 54            # don't throw an error if the file cannot be closed
 55            warnings.warn(f"Cannot close the file: {e}")
 56
 57    def close(self) -> None:
 58        self._file.close()
 59
 60    def is_read_only(self) -> bool:
 61        return sync(self._file.is_read_only())
 62
 63    def is_valid(self) -> bool:
 64        """
 65        Check if the file is a valid brim file.
 66
 67        Returns:
 68            bool: True if the file is valid, False otherwise.
 69        """
 70        # TODO validate file against https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md
 71        return True
 72
 73    @classmethod
 74    def create(cls, filename: str, store_type: StoreType = StoreType.AUTO, brim_version: str = '0.1') -> 'File':
 75        """
 76        Create a new brim file with the specified filename. If the file exists already it will generate an error.
 77
 78        Args:
 79            filename (str): Path to the brim file to be created.
 80            store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 81            brim_version (str): Version of the brim file format to use. Default is '0.1'.
 82
 83        Returns:
 84            File: An instance of the File class representing the newly created brim file.
 85            store_type (str): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 86        """
 87        f = cls(filename, mode='w-', store_type=store_type)
 88
 89        # File version
 90        sync(f._file.create_attr('/', 'brim_version', brim_version))
 91
 92        # Root Brillouin_data group
 93        fr = sync(f._file.create_group(brim_obj_names.Brillouin_base_path))
 94
 95        return f
 96
 97    def create_data_group(self, PSD: np.ndarray, frequency: np.ndarray, px_size_um: tuple, *, index: int = None,
 98                          name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
 99        """
100        Adds a new data entry to the file.
101        Parameters:
102            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
103            frequency (np.ndarray): The frequency data corresponding to the PSD. It must be broadcastable to the PSD shape (the most common case is frequency being 1D, in which case the frequency axis is assumed the same for all the spatial coordinates)
104            px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
105            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
106            name (str, optional): The name for the new data group. Defaults to None.
107            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
108        Returns:
109            Data: The newly created Data object.
110        Raises:
111            IndexError: If the specified index already exists in the dataset.
112            ValueError: If any of the data provided is not valid or consistent
113        """
114        if PSD.ndim != 4:
115            raise ValueError(
116                "'PSD' must have 4 dimensions (z, y, x, spectrum)")
117        try:
118            np.broadcast_shapes(tuple(frequency.shape), tuple(PSD.shape))
119        except ValueError as e:
120            raise ValueError(f"frequency (shape: {frequency.shape}) is not broadcastable to PSD (shape: {PSD.shape}): {e}")
121        if len(px_size_um) != 3:
122            raise ValueError("'px_size_um' must have 3 elements (z,y,x); unused dimensions can be set to nan")
123
124        return self._create_data_group_raw(PSD, frequency, scanning = None, sparse = False, px_size_um=px_size_um, 
125                                             index=index, name=name, compression=compression)
126
127    def create_data_group_sparse(self, PSD: np.ndarray, frequency: np.ndarray, scanning: dict, *, timestamp: np.ndarray = None,
128                                index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
129        """
130        Adds a new [sparse data entry](https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md) to the file.
131        
132        Sparse data allows storage of spectra in a flattened format (first dimension is the spectrum index),
133        with spatial mapping provided separately. This is efficient for data with irregular sampling or missing pixels.
134        
135        Parameters:
136            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. First dimension is spectrum index,
137                last dimension contains the spectral data. Shape: (n_spectra, ..., n_freq_points).
138            frequency (np.ndarray): The frequency data corresponding to the PSD. Must be broadcastable to the PSD array.
139            scanning (dict): Dictionary defining the spatial mapping. Must include at least 'Spatial_map' or 'Cartesian_visualisation'.
140                See `brimfile.data.Data._add_data` docstring for detailed structure of the scanning dictionary.
141            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
142            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
143            name (str, optional): The name for the new data group. Defaults to None.
144            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
145        Returns:
146            Data: The newly created Data object.
147        Raises:
148            IndexError: If the specified index already exists in the dataset.
149            ValueError: If any of the data provided is not valid or consistent
150        """
151        return self._create_data_group_raw(PSD, frequency, scanning=scanning, timestamp=timestamp, sparse=True, index=index, name=name, compression=compression)   
152    
153    def _create_data_group_raw(self, PSD: np.ndarray, frequency: np.ndarray, *, scanning: dict = None, px_size_um = None, timestamp: np.ndarray = None, sparse: bool = False,
154                                index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
155        """
156        Adds a new data entry to the file. Check the documentation for `brimfile.data.Data._add_data` for more details on the parameters.
157        Parameters:
158            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. The last dimension contains the spectra.
159            frequency (np.ndarray): The frequency data corresponding to the PSD. Must be broadcastable to the PSD array.
160            scanning (dict, optional): Spatial mapping metadata. Required for sparse=True, optional for sparse=False.
161                See `brimfile.data.Data._add_data` docstring for detailed structure.
162            px_size_um (tuple, optional): A tuple of 3 elements (z, y, x) for pixel size in μm. For non-sparse data only.
163            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
164            sparse (bool): Whether the data is sparse. See https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md for details. Defaults to False.
165            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
166            name (str, optional): The name for the new data group. Defaults to None.
167            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
168        Returns:
169            Data: The newly created Data object.
170        Raises:
171            IndexError: If the specified index already exists in the dataset.
172            ValueError: If any of the data provided is not valid or consistent
173        """
174        if index is not None:
175            if Data._get_existing_group_name(self._file, index) is not None:
176                raise IndexError(
177                    f"Data {index} already exists in {self._file.filename}")
178        else:
179            data_groups = self.list_data_groups()
180            indices = [dg['index'] for dg in data_groups]
181            indices.sort()
182            index = indices[-1] + 1 if indices else 0  # Next available index
183
184        # create the data group
185        d = Data._create_new(self._file, index, sparse, name)
186        # add the pixel size as an attribute of the data group
187        if px_size_um is not None:
188            sync(self._file.create_attr(d._group, 'element_size', tuple(px_size_um)))
189            units.add_to_attribute(self._file, d._group, 'element_size', 'um')
190        elif not sparse:
191            warnings.warn("Pixel size is not provided for non-sparse data. It is recommended to provide it for proper spatial calibration and visualization.")
192        # add the data to the data group
193        d._add_data(PSD, frequency, scanning = scanning,
194                   timestamp=timestamp, compression=compression)
195        return d
196
197    def list_data_groups(self, retrieve_custom_name=False) -> list:
198        """
199        List all data groups in the brim file.
200
201        Returns:
202            See documentation of brimfile.data.Data.list_data_groups
203        """
204        return Data.list_data_groups(self._file, retrieve_custom_name)
205
206    def get_data(self, index: int = 0) -> 'Data':
207        """
208        Retrieve a Data object for the specified index.
209
210        Args:
211            index (int): The index of the data group to retrieve.
212
213        Returns:
214            Data: The Data object corresponding to the specified index.
215        """
216        group_name: str = Data._get_existing_group_name(self._file, index)
217        if group_name is None:
218            raise IndexError(f"Data {index} not found")
219        data = Data(self._file, concatenate_paths(
220            brim_obj_names.Brillouin_base_path, group_name))
221        return data
222
223    @property
224    def filename(self) -> str:
225        """
226        Get the filename of the brim file.
227
228        Returns:
229            str: The filename of the brim file.
230        """
231        return self._file.filename

Represents a brim file with Brillouin data, extending h5py.File.

File( filename: str, mode: str = 'r', store_type: brimfile.file_abstraction.StoreType = <StoreType.AUTO: 'auto'>)
32        def __init__(self, filename: str, mode: str = 'r', store_type: StoreType = StoreType.AUTO):
33            """
34            Initialize the File object.
35
36            Args:
37                filename (str): Path to the brim file.
38                mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only).
39                            See the definition of `mode` in `brimfile.file_abstraction._zarrFile.__init__()` for more details.
40                            'r' means read only (must exist); 'r+' means read/write (must exist);
41                            'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
42                store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
43            """
44            self._file = _AbstractFile(
45                filename, mode=mode, store_type=store_type)
46            if not self.is_valid():
47                raise ValueError("The brim file is not valid!")

Initialize the File object.

Arguments:
  • filename (str): Path to the brim file.
  • mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only). See the definition of mode in brimfile.file_abstraction._zarrFile.__init__() for more details. 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
  • store_type (StoreType): Type of the store to use, as defined in brimfile.file_abstraction.StoreType. Default is 'AUTO'.
def close(self) -> None:
57    def close(self) -> None:
58        self._file.close()
def is_read_only(self) -> bool:
60    def is_read_only(self) -> bool:
61        return sync(self._file.is_read_only())
def is_valid(self) -> bool:
63    def is_valid(self) -> bool:
64        """
65        Check if the file is a valid brim file.
66
67        Returns:
68            bool: True if the file is valid, False otherwise.
69        """
70        # TODO validate file against https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md
71        return True

Check if the file is a valid brim file.

Returns:

bool: True if the file is valid, False otherwise.

@classmethod
def create( cls, filename: str, store_type: brimfile.file_abstraction.StoreType = <StoreType.AUTO: 'auto'>, brim_version: str = '0.1') -> File:
73    @classmethod
74    def create(cls, filename: str, store_type: StoreType = StoreType.AUTO, brim_version: str = '0.1') -> 'File':
75        """
76        Create a new brim file with the specified filename. If the file exists already it will generate an error.
77
78        Args:
79            filename (str): Path to the brim file to be created.
80            store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
81            brim_version (str): Version of the brim file format to use. Default is '0.1'.
82
83        Returns:
84            File: An instance of the File class representing the newly created brim file.
85            store_type (str): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
86        """
87        f = cls(filename, mode='w-', store_type=store_type)
88
89        # File version
90        sync(f._file.create_attr('/', 'brim_version', brim_version))
91
92        # Root Brillouin_data group
93        fr = sync(f._file.create_group(brim_obj_names.Brillouin_base_path))
94
95        return f

Create a new brim file with the specified filename. If the file exists already it will generate an error.

Arguments:
  • filename (str): Path to the brim file to be created.
  • store_type (StoreType): Type of the store to use, as defined in brimfile.file_abstraction.StoreType. Default is 'AUTO'.
  • brim_version (str): Version of the brim file format to use. Default is '0.1'.
Returns:

File: An instance of the File class representing the newly created brim file. store_type (str): Type of the store to use, as defined in brimfile.file_abstraction.StoreType. Default is 'AUTO'.

def create_data_group( self, PSD: numpy.ndarray, frequency: numpy.ndarray, px_size_um: tuple, *, index: int = None, name: str = None, compression: brimfile.file_abstraction.FileAbstraction.Compression = <brimfile.file_abstraction.FileAbstraction.Compression object>) -> brimfile.data.Data:
 97    def create_data_group(self, PSD: np.ndarray, frequency: np.ndarray, px_size_um: tuple, *, index: int = None,
 98                          name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
 99        """
100        Adds a new data entry to the file.
101        Parameters:
102            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
103            frequency (np.ndarray): The frequency data corresponding to the PSD. It must be broadcastable to the PSD shape (the most common case is frequency being 1D, in which case the frequency axis is assumed the same for all the spatial coordinates)
104            px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
105            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
106            name (str, optional): The name for the new data group. Defaults to None.
107            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
108        Returns:
109            Data: The newly created Data object.
110        Raises:
111            IndexError: If the specified index already exists in the dataset.
112            ValueError: If any of the data provided is not valid or consistent
113        """
114        if PSD.ndim != 4:
115            raise ValueError(
116                "'PSD' must have 4 dimensions (z, y, x, spectrum)")
117        try:
118            np.broadcast_shapes(tuple(frequency.shape), tuple(PSD.shape))
119        except ValueError as e:
120            raise ValueError(f"frequency (shape: {frequency.shape}) is not broadcastable to PSD (shape: {PSD.shape}): {e}")
121        if len(px_size_um) != 3:
122            raise ValueError("'px_size_um' must have 3 elements (z,y,x); unused dimensions can be set to nan")
123
124        return self._create_data_group_raw(PSD, frequency, scanning = None, sparse = False, px_size_um=px_size_um, 
125                                             index=index, name=name, compression=compression)

Adds a new data entry to the file.

Arguments:
  • PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
  • frequency (np.ndarray): The frequency data corresponding to the PSD. It must be broadcastable to the PSD shape (the most common case is frequency being 1D, in which case the frequency axis is assumed the same for all the spatial coordinates)
  • px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
  • index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
  • name (str, optional): The name for the new data group. Defaults to None.
  • compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
Returns:

Data: The newly created Data object.

Raises:
  • IndexError: If the specified index already exists in the dataset.
  • ValueError: If any of the data provided is not valid or consistent
def create_data_group_sparse( self, PSD: numpy.ndarray, frequency: numpy.ndarray, scanning: dict, *, timestamp: numpy.ndarray = None, index: int = None, name: str = None, compression: brimfile.file_abstraction.FileAbstraction.Compression = <brimfile.file_abstraction.FileAbstraction.Compression object>) -> brimfile.data.Data:
127    def create_data_group_sparse(self, PSD: np.ndarray, frequency: np.ndarray, scanning: dict, *, timestamp: np.ndarray = None,
128                                index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
129        """
130        Adds a new [sparse data entry](https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md) to the file.
131        
132        Sparse data allows storage of spectra in a flattened format (first dimension is the spectrum index),
133        with spatial mapping provided separately. This is efficient for data with irregular sampling or missing pixels.
134        
135        Parameters:
136            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. First dimension is spectrum index,
137                last dimension contains the spectral data. Shape: (n_spectra, ..., n_freq_points).
138            frequency (np.ndarray): The frequency data corresponding to the PSD. Must be broadcastable to the PSD array.
139            scanning (dict): Dictionary defining the spatial mapping. Must include at least 'Spatial_map' or 'Cartesian_visualisation'.
140                See `brimfile.data.Data._add_data` docstring for detailed structure of the scanning dictionary.
141            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
142            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
143            name (str, optional): The name for the new data group. Defaults to None.
144            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
145        Returns:
146            Data: The newly created Data object.
147        Raises:
148            IndexError: If the specified index already exists in the dataset.
149            ValueError: If any of the data provided is not valid or consistent
150        """
151        return self._create_data_group_raw(PSD, frequency, scanning=scanning, timestamp=timestamp, sparse=True, index=index, name=name, compression=compression)   

Adds a new sparse data entry to the file.

Sparse data allows storage of spectra in a flattened format (first dimension is the spectrum index), with spatial mapping provided separately. This is efficient for data with irregular sampling or missing pixels.

Arguments:
  • PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. First dimension is spectrum index, last dimension contains the spectral data. Shape: (n_spectra, ..., n_freq_points).
  • frequency (np.ndarray): The frequency data corresponding to the PSD. Must be broadcastable to the PSD array.
  • scanning (dict): Dictionary defining the spatial mapping. Must include at least 'Spatial_map' or 'Cartesian_visualisation'. See brimfile.data.Data._add_data docstring for detailed structure of the scanning dictionary.
  • timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
  • index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
  • name (str, optional): The name for the new data group. Defaults to None.
  • compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
Returns:

Data: The newly created Data object.

Raises:
  • IndexError: If the specified index already exists in the dataset.
  • ValueError: If any of the data provided is not valid or consistent
def list_data_groups(self, retrieve_custom_name=False) -> list:
197    def list_data_groups(self, retrieve_custom_name=False) -> list:
198        """
199        List all data groups in the brim file.
200
201        Returns:
202            See documentation of brimfile.data.Data.list_data_groups
203        """
204        return Data.list_data_groups(self._file, retrieve_custom_name)

List all data groups in the brim file.

Returns:

See documentation of brimfile.data.Data.list_data_groups

def get_data(self, index: int = 0) -> brimfile.data.Data:
206    def get_data(self, index: int = 0) -> 'Data':
207        """
208        Retrieve a Data object for the specified index.
209
210        Args:
211            index (int): The index of the data group to retrieve.
212
213        Returns:
214            Data: The Data object corresponding to the specified index.
215        """
216        group_name: str = Data._get_existing_group_name(self._file, index)
217        if group_name is None:
218            raise IndexError(f"Data {index} not found")
219        data = Data(self._file, concatenate_paths(
220            brim_obj_names.Brillouin_base_path, group_name))
221        return data

Retrieve a Data object for the specified index.

Arguments:
  • index (int): The index of the data group to retrieve.
Returns:

Data: The Data object corresponding to the specified index.

filename: str
223    @property
224    def filename(self) -> str:
225        """
226        Get the filename of the brim file.
227
228        Returns:
229            str: The filename of the brim file.
230        """
231        return self._file.filename

Get the filename of the brim file.

Returns:

str: The filename of the brim file.