brimfile.file

  1import numpy as np
  2import warnings
  3
  4from .data import Data
  5from .metadata import Metadata
  6
  7from .utils import concatenate_paths
  8from .constants import brim_obj_names
  9
 10from .file_abstraction import FileAbstraction, StoreType, sync
 11
 12# don't import _AbstractFile if running in pyodide (it is defined in js)
 13import sys
 14if "pyodide" not in sys.modules:
 15    from .file_abstraction import _AbstractFile
 16
 17__docformat__ = "google"
 18
 19class File:
 20    """
 21    Represents a brim file with Brillouin data, extending h5py.File.
 22    """
 23
 24    if "pyodide" in sys.modules:
 25        def __init__(self, file):
 26            self._file = file
 27            if not self.is_valid():
 28                raise ValueError("The brim file is not valid!")
 29    else:
 30        def __init__(self, filename: str, mode: str = 'r', store_type: StoreType = StoreType.AUTO):
 31            """
 32            Initialize the File object.
 33
 34            Args:
 35                filename (str): Path to the brim file.
 36                mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only).
 37                            See the definition of `mode` in `brimfile.file_abstraction._zarrFile.__init__()` for more details.
 38                            'r' means read only (must exist); 'r+' means read/write (must exist);
 39                            'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
 40                store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 41            """
 42            self._file = _AbstractFile(
 43                filename, mode=mode, store_type=store_type)
 44            if not self.is_valid():
 45                raise ValueError("The brim file is not valid!")
 46            
 47    def __del__(self):
 48        try:
 49            self.close()
 50        except Exception as e:            
 51            # don't throw an error if the file cannot be closed
 52            warnings.warn(f"Cannot close the file: {e}")
 53
 54    def close(self) -> None:
 55        self._file.close()
 56
 57    def is_read_only(self) -> bool:
 58        return sync(self._file.is_read_only())
 59
 60    def is_valid(self) -> bool:
 61        """
 62        Check if the file is a valid brim file.
 63
 64        Returns:
 65            bool: True if the file is valid, False otherwise.
 66        """
 67        # TODO validate file against https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md
 68        return True
 69
 70    @classmethod
 71    def create(cls, filename: str, store_type: StoreType = StoreType.AUTO, brim_version: str = '0.1') -> 'File':
 72        """
 73        Create a new brim file with the specified filename. If the file exists already it will generate an error.
 74
 75        Args:
 76            filename (str): Path to the brim file to be created.
 77            store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 78            brim_version (str): Version of the brim file format to use. Default is '0.1'.
 79
 80        Returns:
 81            File: An instance of the File class representing the newly created brim file.
 82            store_type (str): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 83        """
 84        f = cls(filename, mode='w-', store_type=store_type)
 85
 86        # File version and SubType
 87        sync(f._file.create_attr('/', 'brim_version', brim_version))
 88        sync(f._file.create_attr('/', 'SubTypeID', 0))  # Default subtype
 89
 90        # Root Brillouin_data group
 91        fr = sync(f._file.create_group(brim_obj_names.Brillouin_base_path))
 92
 93        # Create the metadata group
 94        Metadata._create_group_in_file(f._file)
 95
 96        return f
 97
 98    def create_data_group(self, PSD: np.ndarray, frequency: np.ndarray, px_size_um: tuple, index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
 99        """
100        Adds a new data entry to the file.
101        Parameters:
102            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
103            frequency (np.ndarray): The frequency data corresponding to the PSD. It must be 4D or 1D (in which case the frequency axis is assumed the same for all the spatial coordinates)
104            px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
105            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
106            name (str, optional): The name for the new data group. Defaults to None.
107            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
108        Returns:
109            Data: The newly created Data object.
110        Raises:
111            IndexError: If the specified index already exists in the dataset.
112            ValueError: If any of the data provided is not valid or consistent
113        """
114        if PSD.ndim != 4:
115            raise ValueError(
116                "'PSD' must have 4 dimensions (z, y, x, spectrum)")
117        if frequency.ndim != 1 and frequency.ndim != 4:
118            raise ValueError(
119                "'frequency' must have either 4 dimensions (z, y, x, spectrum) or 1 dimension (spectrum)")
120        if len(px_size_um) != 3:
121            raise ValueError("'px_size_um' must have 3 elements (z,y,x); unused dimensions can be set to nan")
122
123        PSD_flat = np.reshape(PSD, (-1, PSD.shape[3]))
124        if frequency.ndim == 4:
125            freq_flat = np.reshape(frequency, (-1, frequency.shape[3]))
126        else:
127            freq_flat = frequency
128        indices = np.arange(PSD_flat.shape[0])
129        cartesian_vis = np.reshape(indices, PSD.shape[0:3])
130        scanning = {'Cartesian_visualisation': cartesian_vis,
131                    'Cartesian_visualisation_pixel': px_size_um, 'Cartesian_visualisation_pixel_unit': 'um'}
132
133        return self.create_data_group_raw(PSD_flat, freq_flat, scanning, index=index, name=name, compression=compression)
134
135    def create_data_group_raw(self, PSD: np.ndarray, frequency: np.ndarray, scanning: dict, timestamp: np.ndarray = None, index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
136        """
137        Adds a new data entry to the file. Check the documentation for `brimfile.data.Data.add_data` for more details on the parameters.
138        Parameters:
139            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. The last dimension contains the spectra.
140            frequency (np.ndarray): The frequency data corresponding to the PSD.
141            scanning (dict): Metadata related to the scanning process. See Data.add_data for more details.
142            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
143            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
144            name (str, optional): The name for the new data group. Defaults to None.
145            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
146        Returns:
147            Data: The newly created Data object.
148        Raises:
149            IndexError: If the specified index already exists in the dataset.
150            ValueError: If any of the data provided is not valid or consistent
151        """
152        if index is not None:
153            if Data._get_existing_group_name(self._file, index) is not None:
154                raise IndexError(
155                    f"Data {index} already exists in {self._file.filename}")
156        else:
157            data_groups = self.list_data_groups()
158            indices = [dg['index'] for dg in data_groups]
159            indices.sort()
160            index = indices[-1] + 1 if indices else 0  # Next available index
161
162        d = Data._create_new(self._file, index, name)
163        d.add_data(PSD, frequency, scanning,
164                   timestamp=timestamp, compression=compression)
165        return d
166
167    def list_data_groups(self, retrieve_custom_name=False) -> list:
168        """
169        List all data groups in the brim file.
170
171        Returns:
172            See documentation of brimfile.data.Data.list_data_groups
173        """
174        return Data.list_data_groups(self._file, retrieve_custom_name)
175
176    def get_data(self, index: int = 0) -> 'Data':
177        """
178        Retrieve a Data object for the specified index.
179
180        Args:
181            index (int): The index of the data group to retrieve.
182
183        Returns:
184            Data: The Data object corresponding to the specified index.
185        """
186        group_name: str = Data._get_existing_group_name(self._file, index)
187        if group_name is None:
188            raise IndexError(f"Data {index} not found")
189        data = Data(self._file, concatenate_paths(
190            brim_obj_names.Brillouin_base_path, group_name))
191        return data
192
193    @property
194    def filename(self) -> str:
195        """
196        Get the filename of the brim file.
197
198        Returns:
199            str: The filename of the brim file.
200        """
201        return self._file.filename
class File:
 20class File:
 21    """
 22    Represents a brim file with Brillouin data, extending h5py.File.
 23    """
 24
 25    if "pyodide" in sys.modules:
 26        def __init__(self, file):
 27            self._file = file
 28            if not self.is_valid():
 29                raise ValueError("The brim file is not valid!")
 30    else:
 31        def __init__(self, filename: str, mode: str = 'r', store_type: StoreType = StoreType.AUTO):
 32            """
 33            Initialize the File object.
 34
 35            Args:
 36                filename (str): Path to the brim file.
 37                mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only).
 38                            See the definition of `mode` in `brimfile.file_abstraction._zarrFile.__init__()` for more details.
 39                            'r' means read only (must exist); 'r+' means read/write (must exist);
 40                            'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
 41                store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 42            """
 43            self._file = _AbstractFile(
 44                filename, mode=mode, store_type=store_type)
 45            if not self.is_valid():
 46                raise ValueError("The brim file is not valid!")
 47            
 48    def __del__(self):
 49        try:
 50            self.close()
 51        except Exception as e:            
 52            # don't throw an error if the file cannot be closed
 53            warnings.warn(f"Cannot close the file: {e}")
 54
 55    def close(self) -> None:
 56        self._file.close()
 57
 58    def is_read_only(self) -> bool:
 59        return sync(self._file.is_read_only())
 60
 61    def is_valid(self) -> bool:
 62        """
 63        Check if the file is a valid brim file.
 64
 65        Returns:
 66            bool: True if the file is valid, False otherwise.
 67        """
 68        # TODO validate file against https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md
 69        return True
 70
 71    @classmethod
 72    def create(cls, filename: str, store_type: StoreType = StoreType.AUTO, brim_version: str = '0.1') -> 'File':
 73        """
 74        Create a new brim file with the specified filename. If the file exists already it will generate an error.
 75
 76        Args:
 77            filename (str): Path to the brim file to be created.
 78            store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 79            brim_version (str): Version of the brim file format to use. Default is '0.1'.
 80
 81        Returns:
 82            File: An instance of the File class representing the newly created brim file.
 83            store_type (str): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
 84        """
 85        f = cls(filename, mode='w-', store_type=store_type)
 86
 87        # File version and SubType
 88        sync(f._file.create_attr('/', 'brim_version', brim_version))
 89        sync(f._file.create_attr('/', 'SubTypeID', 0))  # Default subtype
 90
 91        # Root Brillouin_data group
 92        fr = sync(f._file.create_group(brim_obj_names.Brillouin_base_path))
 93
 94        # Create the metadata group
 95        Metadata._create_group_in_file(f._file)
 96
 97        return f
 98
 99    def create_data_group(self, PSD: np.ndarray, frequency: np.ndarray, px_size_um: tuple, index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
100        """
101        Adds a new data entry to the file.
102        Parameters:
103            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
104            frequency (np.ndarray): The frequency data corresponding to the PSD. It must be 4D or 1D (in which case the frequency axis is assumed the same for all the spatial coordinates)
105            px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
106            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
107            name (str, optional): The name for the new data group. Defaults to None.
108            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
109        Returns:
110            Data: The newly created Data object.
111        Raises:
112            IndexError: If the specified index already exists in the dataset.
113            ValueError: If any of the data provided is not valid or consistent
114        """
115        if PSD.ndim != 4:
116            raise ValueError(
117                "'PSD' must have 4 dimensions (z, y, x, spectrum)")
118        if frequency.ndim != 1 and frequency.ndim != 4:
119            raise ValueError(
120                "'frequency' must have either 4 dimensions (z, y, x, spectrum) or 1 dimension (spectrum)")
121        if len(px_size_um) != 3:
122            raise ValueError("'px_size_um' must have 3 elements (z,y,x); unused dimensions can be set to nan")
123
124        PSD_flat = np.reshape(PSD, (-1, PSD.shape[3]))
125        if frequency.ndim == 4:
126            freq_flat = np.reshape(frequency, (-1, frequency.shape[3]))
127        else:
128            freq_flat = frequency
129        indices = np.arange(PSD_flat.shape[0])
130        cartesian_vis = np.reshape(indices, PSD.shape[0:3])
131        scanning = {'Cartesian_visualisation': cartesian_vis,
132                    'Cartesian_visualisation_pixel': px_size_um, 'Cartesian_visualisation_pixel_unit': 'um'}
133
134        return self.create_data_group_raw(PSD_flat, freq_flat, scanning, index=index, name=name, compression=compression)
135
136    def create_data_group_raw(self, PSD: np.ndarray, frequency: np.ndarray, scanning: dict, timestamp: np.ndarray = None, index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
137        """
138        Adds a new data entry to the file. Check the documentation for `brimfile.data.Data.add_data` for more details on the parameters.
139        Parameters:
140            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. The last dimension contains the spectra.
141            frequency (np.ndarray): The frequency data corresponding to the PSD.
142            scanning (dict): Metadata related to the scanning process. See Data.add_data for more details.
143            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
144            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
145            name (str, optional): The name for the new data group. Defaults to None.
146            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
147        Returns:
148            Data: The newly created Data object.
149        Raises:
150            IndexError: If the specified index already exists in the dataset.
151            ValueError: If any of the data provided is not valid or consistent
152        """
153        if index is not None:
154            if Data._get_existing_group_name(self._file, index) is not None:
155                raise IndexError(
156                    f"Data {index} already exists in {self._file.filename}")
157        else:
158            data_groups = self.list_data_groups()
159            indices = [dg['index'] for dg in data_groups]
160            indices.sort()
161            index = indices[-1] + 1 if indices else 0  # Next available index
162
163        d = Data._create_new(self._file, index, name)
164        d.add_data(PSD, frequency, scanning,
165                   timestamp=timestamp, compression=compression)
166        return d
167
168    def list_data_groups(self, retrieve_custom_name=False) -> list:
169        """
170        List all data groups in the brim file.
171
172        Returns:
173            See documentation of brimfile.data.Data.list_data_groups
174        """
175        return Data.list_data_groups(self._file, retrieve_custom_name)
176
177    def get_data(self, index: int = 0) -> 'Data':
178        """
179        Retrieve a Data object for the specified index.
180
181        Args:
182            index (int): The index of the data group to retrieve.
183
184        Returns:
185            Data: The Data object corresponding to the specified index.
186        """
187        group_name: str = Data._get_existing_group_name(self._file, index)
188        if group_name is None:
189            raise IndexError(f"Data {index} not found")
190        data = Data(self._file, concatenate_paths(
191            brim_obj_names.Brillouin_base_path, group_name))
192        return data
193
194    @property
195    def filename(self) -> str:
196        """
197        Get the filename of the brim file.
198
199        Returns:
200            str: The filename of the brim file.
201        """
202        return self._file.filename

Represents a brim file with Brillouin data, extending h5py.File.

File( filename: str, mode: str = 'r', store_type: brimfile.file_abstraction.StoreType = <StoreType.AUTO: 'auto'>)
31        def __init__(self, filename: str, mode: str = 'r', store_type: StoreType = StoreType.AUTO):
32            """
33            Initialize the File object.
34
35            Args:
36                filename (str): Path to the brim file.
37                mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only).
38                            See the definition of `mode` in `brimfile.file_abstraction._zarrFile.__init__()` for more details.
39                            'r' means read only (must exist); 'r+' means read/write (must exist);
40                            'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
41                store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
42            """
43            self._file = _AbstractFile(
44                filename, mode=mode, store_type=store_type)
45            if not self.is_valid():
46                raise ValueError("The brim file is not valid!")

Initialize the File object.

Arguments:
  • filename (str): Path to the brim file.
  • mode: {'r', 'r+', 'a', 'w', 'w-'} the mode for opening the file (default is 'r' for read-only). See the definition of mode in brimfile.file_abstraction._zarrFile.__init__() for more details. 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists).
  • store_type (StoreType): Type of the store to use, as defined in brimfile.file_abstraction.StoreType. Default is 'AUTO'.
def close(self) -> None:
55    def close(self) -> None:
56        self._file.close()
def is_read_only(self) -> bool:
58    def is_read_only(self) -> bool:
59        return sync(self._file.is_read_only())
def is_valid(self) -> bool:
61    def is_valid(self) -> bool:
62        """
63        Check if the file is a valid brim file.
64
65        Returns:
66            bool: True if the file is valid, False otherwise.
67        """
68        # TODO validate file against https://github.com/prevedel-lab/Brillouin-standard-file/blob/main/docs/brim_file_specs.md
69        return True

Check if the file is a valid brim file.

Returns:

bool: True if the file is valid, False otherwise.

@classmethod
def create( cls, filename: str, store_type: brimfile.file_abstraction.StoreType = <StoreType.AUTO: 'auto'>, brim_version: str = '0.1') -> File:
71    @classmethod
72    def create(cls, filename: str, store_type: StoreType = StoreType.AUTO, brim_version: str = '0.1') -> 'File':
73        """
74        Create a new brim file with the specified filename. If the file exists already it will generate an error.
75
76        Args:
77            filename (str): Path to the brim file to be created.
78            store_type (StoreType): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
79            brim_version (str): Version of the brim file format to use. Default is '0.1'.
80
81        Returns:
82            File: An instance of the File class representing the newly created brim file.
83            store_type (str): Type of the store to use, as defined in `brimfile.file_abstraction.StoreType`. Default is 'AUTO'.
84        """
85        f = cls(filename, mode='w-', store_type=store_type)
86
87        # File version and SubType
88        sync(f._file.create_attr('/', 'brim_version', brim_version))
89        sync(f._file.create_attr('/', 'SubTypeID', 0))  # Default subtype
90
91        # Root Brillouin_data group
92        fr = sync(f._file.create_group(brim_obj_names.Brillouin_base_path))
93
94        # Create the metadata group
95        Metadata._create_group_in_file(f._file)
96
97        return f

Create a new brim file with the specified filename. If the file exists already it will generate an error.

Arguments:
  • filename (str): Path to the brim file to be created.
  • store_type (StoreType): Type of the store to use, as defined in brimfile.file_abstraction.StoreType. Default is 'AUTO'.
  • brim_version (str): Version of the brim file format to use. Default is '0.1'.
Returns:

File: An instance of the File class representing the newly created brim file. store_type (str): Type of the store to use, as defined in brimfile.file_abstraction.StoreType. Default is 'AUTO'.

def create_data_group( self, PSD: numpy.ndarray, frequency: numpy.ndarray, px_size_um: tuple, index: int = None, name: str = None, compression: brimfile.file_abstraction.FileAbstraction.Compression = <brimfile.file_abstraction.FileAbstraction.Compression object>) -> brimfile.data.Data:
 99    def create_data_group(self, PSD: np.ndarray, frequency: np.ndarray, px_size_um: tuple, index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
100        """
101        Adds a new data entry to the file.
102        Parameters:
103            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
104            frequency (np.ndarray): The frequency data corresponding to the PSD. It must be 4D or 1D (in which case the frequency axis is assumed the same for all the spatial coordinates)
105            px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
106            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
107            name (str, optional): The name for the new data group. Defaults to None.
108            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
109        Returns:
110            Data: The newly created Data object.
111        Raises:
112            IndexError: If the specified index already exists in the dataset.
113            ValueError: If any of the data provided is not valid or consistent
114        """
115        if PSD.ndim != 4:
116            raise ValueError(
117                "'PSD' must have 4 dimensions (z, y, x, spectrum)")
118        if frequency.ndim != 1 and frequency.ndim != 4:
119            raise ValueError(
120                "'frequency' must have either 4 dimensions (z, y, x, spectrum) or 1 dimension (spectrum)")
121        if len(px_size_um) != 3:
122            raise ValueError("'px_size_um' must have 3 elements (z,y,x); unused dimensions can be set to nan")
123
124        PSD_flat = np.reshape(PSD, (-1, PSD.shape[3]))
125        if frequency.ndim == 4:
126            freq_flat = np.reshape(frequency, (-1, frequency.shape[3]))
127        else:
128            freq_flat = frequency
129        indices = np.arange(PSD_flat.shape[0])
130        cartesian_vis = np.reshape(indices, PSD.shape[0:3])
131        scanning = {'Cartesian_visualisation': cartesian_vis,
132                    'Cartesian_visualisation_pixel': px_size_um, 'Cartesian_visualisation_pixel_unit': 'um'}
133
134        return self.create_data_group_raw(PSD_flat, freq_flat, scanning, index=index, name=name, compression=compression)

Adds a new data entry to the file.

Arguments:
  • PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. It must be 4D with dimensions z, y, x, spectrum
  • frequency (np.ndarray): The frequency data corresponding to the PSD. It must be 4D or 1D (in which case the frequency axis is assumed the same for all the spatial coordinates)
  • px_size_um (tuple): A tuple of 3 elements, in the order z,y,x, corresponding to the pixel size in um. Unused dimensions can be set to None.
  • index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
  • name (str, optional): The name for the new data group. Defaults to None.
  • compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
Returns:

Data: The newly created Data object.

Raises:
  • IndexError: If the specified index already exists in the dataset.
  • ValueError: If any of the data provided is not valid or consistent
def create_data_group_raw( self, PSD: numpy.ndarray, frequency: numpy.ndarray, scanning: dict, timestamp: numpy.ndarray = None, index: int = None, name: str = None, compression: brimfile.file_abstraction.FileAbstraction.Compression = <brimfile.file_abstraction.FileAbstraction.Compression object>) -> brimfile.data.Data:
136    def create_data_group_raw(self, PSD: np.ndarray, frequency: np.ndarray, scanning: dict, timestamp: np.ndarray = None, index: int = None, name: str = None, compression: FileAbstraction.Compression = FileAbstraction.Compression()) -> 'Data':
137        """
138        Adds a new data entry to the file. Check the documentation for `brimfile.data.Data.add_data` for more details on the parameters.
139        Parameters:
140            PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. The last dimension contains the spectra.
141            frequency (np.ndarray): The frequency data corresponding to the PSD.
142            scanning (dict): Metadata related to the scanning process. See Data.add_data for more details.
143            timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
144            index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
145            name (str, optional): The name for the new data group. Defaults to None.
146            compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
147        Returns:
148            Data: The newly created Data object.
149        Raises:
150            IndexError: If the specified index already exists in the dataset.
151            ValueError: If any of the data provided is not valid or consistent
152        """
153        if index is not None:
154            if Data._get_existing_group_name(self._file, index) is not None:
155                raise IndexError(
156                    f"Data {index} already exists in {self._file.filename}")
157        else:
158            data_groups = self.list_data_groups()
159            indices = [dg['index'] for dg in data_groups]
160            indices.sort()
161            index = indices[-1] + 1 if indices else 0  # Next available index
162
163        d = Data._create_new(self._file, index, name)
164        d.add_data(PSD, frequency, scanning,
165                   timestamp=timestamp, compression=compression)
166        return d

Adds a new data entry to the file. Check the documentation for brimfile.data.Data.add_data for more details on the parameters.

Arguments:
  • PSD (np.ndarray): The Power Spectral Density (PSD) data to be added. The last dimension contains the spectra.
  • frequency (np.ndarray): The frequency data corresponding to the PSD.
  • scanning (dict): Metadata related to the scanning process. See Data.add_data for more details.
  • timestamp (np.ndarray, optional): Timestamps in milliseconds for the data. Defaults to None.
  • index (int, optional): The index for the new data group. If None, the next available index is used. Defaults to None.
  • name (str, optional): The name for the new data group. Defaults to None.
  • compression (FileAbstraction.Compression, optional): The compression method to use for the data. Defaults to FileAbstraction.Compression.DEFAULT.
Returns:

Data: The newly created Data object.

Raises:
  • IndexError: If the specified index already exists in the dataset.
  • ValueError: If any of the data provided is not valid or consistent
def list_data_groups(self, retrieve_custom_name=False) -> list:
168    def list_data_groups(self, retrieve_custom_name=False) -> list:
169        """
170        List all data groups in the brim file.
171
172        Returns:
173            See documentation of brimfile.data.Data.list_data_groups
174        """
175        return Data.list_data_groups(self._file, retrieve_custom_name)

List all data groups in the brim file.

Returns:

See documentation of brimfile.data.Data.list_data_groups

def get_data(self, index: int = 0) -> brimfile.data.Data:
177    def get_data(self, index: int = 0) -> 'Data':
178        """
179        Retrieve a Data object for the specified index.
180
181        Args:
182            index (int): The index of the data group to retrieve.
183
184        Returns:
185            Data: The Data object corresponding to the specified index.
186        """
187        group_name: str = Data._get_existing_group_name(self._file, index)
188        if group_name is None:
189            raise IndexError(f"Data {index} not found")
190        data = Data(self._file, concatenate_paths(
191            brim_obj_names.Brillouin_base_path, group_name))
192        return data

Retrieve a Data object for the specified index.

Arguments:
  • index (int): The index of the data group to retrieve.
Returns:

Data: The Data object corresponding to the specified index.

filename: str
194    @property
195    def filename(self) -> str:
196        """
197        Get the filename of the brim file.
198
199        Returns:
200            str: The filename of the brim file.
201        """
202        return self._file.filename

Get the filename of the brim file.

Returns:

str: The filename of the brim file.