1"""
2Routines for reading/writing PSI style HDF5 and HDF4 data files.
3
4Written by Ronald M. Caplan, Ryder Davidson, & Cooper Downs.
5
62023/09: Start with SVN version r454, 2023/09/12 by RC, Predictive Science Inc.
7
82024/06: CD: add the get_scales subroutines.
9
102024/11: RD: Major Update: Add several generic data loading capabilites for faster IO.
11 - Read only the portions of data required (`read_hdf_by_index`, `read_hdf_by_value`).
12 - Interpolate to slices along a given axes (`np_interpolate_slice_from_hdf`) or
13 generic positions (`interpolate_positions_from_hdf`).
14
152025/06: CD: Prep for integration into psi-io package, HDF4 is now optional.
16
172026/01: RD: Refactor legacy routines to use new generic routines where possible.
18"""
19
20from __future__ import annotations
21
22__all__ = [
23 "read_hdf_meta",
24 "read_rtp_meta",
25
26 "get_scales_1d",
27 "get_scales_2d",
28 "get_scales_3d",
29
30 "read_hdf_by_index",
31 "read_hdf_by_value",
32 "read_hdf_by_ivalue",
33
34 "np_interpolate_slice_from_hdf",
35 "sp_interpolate_slice_from_hdf",
36 "interpolate_positions_from_hdf",
37
38 "instantiate_linear_interpolator",
39 "interpolate_point_from_1d_slice",
40 "interpolate_point_from_2d_slice",
41
42 "read_hdf_data",
43 "rdhdf_1d",
44 "rdhdf_2d",
45 "rdhdf_3d",
46
47 "write_hdf_data",
48 "wrhdf_1d",
49 "wrhdf_2d",
50 "wrhdf_3d",
51]
52
53import math
54from collections import namedtuple
55from pathlib import Path
56from types import MappingProxyType
57from typing import Optional, Literal, Tuple, Iterable, List, Dict, Union, Callable, Any
58
59import numpy as np
60import h5py as h5
61
62# -----------------------------------------------------------------------------
63# Optional Imports and Import Checking
64# -----------------------------------------------------------------------------
65# These packages are needed by several functions and must be imported in the
66# module namespace.
67try:
68 import pyhdf.SD as h4
69 H4_AVAILABLE = True
70 NPTYPES_TO_SDCTYPES = MappingProxyType({
71 "int8": h4.SDC.INT8,
72 "uint8": h4.SDC.UINT8,
73 "int16": h4.SDC.INT16,
74 "uint16": h4.SDC.UINT16,
75 "int32": h4.SDC.INT32,
76 "uint32": h4.SDC.UINT32,
77 "float16": h4.SDC.FLOAT32,
78 "float32": h4.SDC.FLOAT32,
79 "float64": h4.SDC.FLOAT64,
80 })
81except ImportError:
82 H4_AVAILABLE = False
83 NPTYPES_TO_SDCTYPES = {}
84
85try:
86 from scipy.interpolate import RegularGridInterpolator
87 SCIPY_AVAILABLE = True
88except ImportError:
89 SCIPY_AVAILABLE = False
90
91
92# Functions to stop execution if a package doesn't exist.
93def _except_no_pyhdf():
94 if not H4_AVAILABLE:
95 raise ImportError('The pyhdf package is required to read/write HDF4 .hdf files!')
96 return
97
98
99def _except_no_scipy():
100 if not SCIPY_AVAILABLE:
101 raise ImportError('The scipy package is required for the interpolation routines!')
102 return
103
104
105SDC_TYPE_CONVERSIONS = MappingProxyType({
106 3: np.dtype("ubyte"),
107 4: np.dtype("byte"),
108 5: np.dtype("float32"),
109 6: np.dtype("float64"),
110 20: np.dtype("int8"),
111 21: np.dtype("uint8"),
112 22: np.dtype("int16"),
113 23: np.dtype("uint16"),
114 24: np.dtype("int32"),
115 25: np.dtype("uint32")
116})
117"""Helper dictionary for mapping HDF4 types to numpy dtypes"""
118
119
120PSI_DATA_ID = MappingProxyType({
121 'h4': 'Data-Set-2',
122 'h5': 'Data'
123})
124"""Mapping of PSI standard dataset names for HDF4 and HDF5 files"""
125
126
127PSI_SCALE_ID = MappingProxyType({
128 'h4': ('fakeDim0', 'fakeDim1', 'fakeDim2'),
129 'h5': ('dim1', 'dim2', 'dim3')
130})
131"""Mapping of PSI standard scale names for HDF4 and HDF5 files"""
132
133
134HDFEXT = {'.hdf', '.h5'}
135"""Set of possible HDF file extensions"""
136
137
138HdfExtType = Literal['.hdf', '.h5']
139"""Type alias for possible HDF file extensions"""
140
141
142HdfScaleMeta = namedtuple('HdfScaleMeta', ['name', 'type', 'shape', 'imin', 'imax'])
143"""
144 Named tuples for HDF metadata
145
146 Parameters
147 ----------
148 name : str
149 The name of the scale.
150 type : str
151 The data type of the scale.
152 shape : Tuple[int, ...]
153 The shape of the scale.
154 imin : float
155 The minimum value of the scale.
156 This assumes the scale is monotonically increasing.
157 imax : float
158 The maximum value of the scale.
159 This assumes the scale is monotonically increasing.
160"""
161
162
163HdfDataMeta = namedtuple('HdfDataMeta', ['name', 'type', 'shape', 'scales'])
164"""
165 Named tuple for HDF dataset metadata
166
167 Parameters
168 ----------
169 name : str
170 The name of the dataset.
171 type : str
172 The data type of the dataset.
173 shape : tuple of int
174 The shape of the dataset.
175 scales : list of HdfScaleMeta
176 A list of scale metadata objects corresponding to each dimension of the dataset.
177 If the dataset has no scales, this list will be empty.
178"""
179
180
181def _dispatch_by_ext(ifile: Union[Path, str],
182 hdf4_func: Callable,
183 hdf5_func: Callable,
184 *args: Any, **kwargs: Any
185 ):
186 """
187 Dispatch function to call HDF4 or HDF5 specific functions based on file extension.
188
189 Parameters
190 ----------
191 ifile : Path | str
192 The path to the HDF file.
193 hdf4_func : Callable
194 The function to call for HDF4 files.
195 hdf5_func : Callable
196 The function to call for HDF5 files.
197 *args : Any
198 Positional arguments to pass to the selected function.
199
200 Raises
201 ------
202 ValueError
203 If the file does not have a `.hdf` or `.h5` extension.
204 ImportError
205 If the file is HDF4 and the `pyhdf` package is not available
206 """
207 ipath = Path(ifile)
208 if ipath.suffix == '.h5':
209 return hdf5_func(ifile, *args, **kwargs)
210 if ipath.suffix == '.hdf':
211 _except_no_pyhdf()
212 return hdf4_func(ifile, *args, **kwargs)
213 raise ValueError("File must be HDF4 (.hdf) or HDF5 (.h5)")
214
215
216# -----------------------------------------------------------------------------
217# "Classic" HDF reading and writing routines adapted from psihdf.py or psi_io.py.
218# -----------------------------------------------------------------------------
219
220
[docs]
221def rdhdf_1d(hdf_filename: str
222 ) -> Tuple[np.ndarray, np.ndarray]:
223 """Read a 1D PSI-style HDF5 or HDF4 file.
224
225 Parameters
226 ----------
227 hdf_filename : str
228 The path to the 1D HDF5 (.h5) or HDF4 (.hdf) file to read.
229
230 Returns
231 -------
232 x : np.ndarray
233 1D array of scales.
234 f : np.ndarray
235 1D array of data.
236 """
237 return _rdhdf_nd(hdf_filename, dimensionality=1)
238
239
[docs]
240def rdhdf_2d(hdf_filename: str
241 ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
242 """Read a 2D PSI-style HDF5 or HDF4 file.
243
244 The data in the HDF file is assumed to be ordered X,Y in Fortran order.
245
246 Each dimension is assumed to have a 1D "scale" associated with it that
247 describes the rectilinear grid coordinates in each dimension.
248
249 Parameters
250 ----------
251 hdf_filename : str
252 The path to the 1D HDF5 (.h5) or HDF4 (.hdf) file to read.
253
254 Returns
255 -------
256 x : np.ndarray
257 1D array of scales in the X dimension.
258 y : np.ndarray
259 1D array of scales in the Y dimension.
260 f : np.ndarray
261 2D array of data, C-ordered as shape(ny,nx) for Python (see note 1).
262 """
263 return _rdhdf_nd(hdf_filename, dimensionality=2)
264
265
[docs]
266def rdhdf_3d(hdf_filename: str
267 ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
268 """Read a 3D PSI-style HDF5 or HDF4 file.
269
270 The data in the HDF file is assumed to be ordered X,Y,Z in Fortran order.
271
272 Each dimension is assumed to have a 1D "scale" associated with it that
273 describes the rectilinear grid coordinates in each dimension.
274
275 Parameters
276 ----------
277 hdf_filename : str
278 The path to the 1D HDF5 (.h5) or HDF4 (.hdf) file to read.
279
280 Returns
281 -------
282 x : np.ndarray
283 1D array of scales in the X dimension.
284 y : np.ndarray
285 1D array of scales in the Y dimension.
286 z : np.ndarray
287 1D array of scales in the Z dimension.
288 f : np.ndarray
289 3D array of data, C-ordered as shape(nz,ny,nx) for Python (see note 1).
290 """
291 return _rdhdf_nd(hdf_filename, dimensionality=3)
292
293
[docs]
294def wrhdf_1d(hdf_filename: str,
295 x: np.ndarray,
296 f: np.ndarray) -> None:
297 """Write a 1D PSI-style HDF5 or HDF4 file.
298
299 Parameters
300 ----------
301 hdf_filename : str
302 The path to the 1D HDF5 (.h5) or HDF4 (.hdf) file to write.
303 x : np.ndarray
304 1D array of scales.
305 f : np.ndarray
306 1D array of data.
307 """
308 x = np.asarray(x)
309 y = np.array([])
310 z = np.array([])
311 f = np.asarray(f)
312 _wrhdf(hdf_filename, x, y, z, f)
313
314
[docs]
315def wrhdf_2d(hdf_filename: str,
316 x: np.ndarray,
317 y: np.ndarray,
318 f: np.ndarray) -> None:
319 """Write a 2D PSI-style HDF5 or HDF4 file.
320
321 The data in the HDF file will appear as X,Y in Fortran order.
322
323 Each dimension requires a 1D "scale" associated with it that
324 describes the rectilinear grid coordinates in each dimension.
325
326 Parameters
327 ----------
328 hdf_filename : str
329 The path to the 2D HDF5 (.h5) or HDF4 (.hdf) file to write.
330 x : np.ndarray
331 1D array of scales in the X dimension.
332 y : np.ndarray
333 1D array of scales in the Y dimension.
334 f : np.ndarray
335 2D array of data, C-ordered as shape(ny,nx) for Python (see note 1).
336 """
337 x = np.asarray(x)
338 y = np.asarray(y)
339 z = np.array([])
340 f = np.asarray(f)
341 if hdf_filename.endswith('h5'):
342 _wrhdf(hdf_filename, x, y, z, f)
343 else:
344 _wrhdf(hdf_filename, y, x, z, f)
345
346
[docs]
347def wrhdf_3d(hdf_filename: str,
348 x: np.ndarray,
349 y: np.ndarray,
350 z: np.ndarray,
351 f: np.ndarray) -> None:
352 """Write a 3D PSI-style HDF5 or HDF4 file.
353
354 The data in the HDF file will appear as X,Y,Z in Fortran order.
355
356 Each dimension requires a 1D "scale" associated with it that
357 describes the rectilinear grid coordinates in each dimension.
358
359 Parameters
360 ----------
361 hdf_filename : str
362 The path to the 3D HDF5 (.h5) or HDF4 (.hdf) file to write.
363 x : np.ndarray
364 1D array of scales in the X dimension.
365 y : np.ndarray
366 1D array of scales in the Y dimension.
367 z : np.ndarray
368 1D array of scales in the Z dimension.
369 f : np.ndarray
370 3D array of data, C-ordered as shape(nz,ny,nx) for Python (see note 1).
371 """
372 x = np.asarray(x)
373 y = np.asarray(y)
374 z = np.asarray(z)
375 f = np.asarray(f)
376 if hdf_filename.endswith('h5'):
377 _wrhdf(hdf_filename, x, y, z, f)
378 else:
379 _wrhdf(hdf_filename, z, y, x, f)
380
381
[docs]
382def get_scales_1d(filename: str
383 ) -> np.ndarray:
384 """Wrapper to return the scales of a 1D PSI style HDF5 or HDF4 dataset.
385
386 This routine does not load the data array so it can be quite fast for large files.
387
388 Parameters
389 ----------
390 filename : str
391 The path to the 1D HDF5 (.h5) or HDF4 (.hdf) file to read.
392
393 Returns
394 -------
395 x : np.ndarray
396 1D array of scales in the X dimension.
397 """
398 return _dispatch_by_ext(filename, _get_scales_nd_h4, _get_scales_nd_h5,
399 dimensionality=1)
400
401
[docs]
402def get_scales_2d(filename: str
403 ) -> Tuple[np.ndarray, np.ndarray]:
404 """Wrapper to return the scales of a 2D PSI style HDF5 or HDF4 dataset.
405
406 This routine does not load the data array so it can be quite fast for large files.
407
408 The data in the HDF file is assumed to be ordered X,Y in Fortran order.
409
410 Parameters
411 ----------
412 filename : str
413 The path to the 1D HDF5 (.h5) or HDF4 (.hdf) file to read.
414
415 Returns
416 -------
417 x : np.ndarray
418 1D array of scales in the X dimension.
419 y : np.ndarray
420 1D array of scales in the Y dimension.
421 """
422 return _dispatch_by_ext(filename, _get_scales_nd_h4, _get_scales_nd_h5,
423 dimensionality=2)
424
425
[docs]
426def get_scales_3d(filename: str
427 ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
428 """Wrapper to return the scales of a 3D PSI style HDF5 or HDF4 dataset.
429
430 This routine does not load the data array so it can be quite fast for large files.
431
432 The data in the HDF file is assumed to be ordered X,Y,Z in Fortran order.
433
434 Parameters
435 ----------
436 filename : str
437 The path to the 1D HDF5 (.h5) or HDF4 (.hdf) file to read.
438
439 Returns
440 -------
441 x : np.ndarray
442 1D array of scales in the X dimension.
443 y : np.ndarray
444 1D array of scales in the Y dimension.
445 z : np.ndarray
446 1D array of scales in the Z dimension.
447 """
448 return _dispatch_by_ext(filename, _get_scales_nd_h4, _get_scales_nd_h5,
449 dimensionality=3)
450
451
452# -----------------------------------------------------------------------------
453# "Updated" HDF reading and slicing routines for Hdf4 and Hdf5 datasets.
454# -----------------------------------------------------------------------------
455
456
504
505
537
538
[docs]
539def read_hdf_data(ifile: Union[Path, str], /,
540 dataset_id: Optional[str] = None,
541 return_scales: bool = True,
542 ) -> Tuple[np.ndarray]:
543 """
544 Read data from an HDF4 (.hdf) or HDF5 (.h5) file.
545
546 Parameters
547 ----------
548 ifile : Path | str
549 The path to the HDF file to read.
550 dataset_id : str | None
551 The identifier of the dataset to read.
552 If None, a default dataset is used ('Data-Set-2' for HDF4 and 'Data' for HDF5).
553 return_scales : bool
554 If True, the scales (coordinate arrays) for each dimension are also returned.
555
556 Returns
557 -------
558 out : np.ndarray | tuple[np.ndarray]
559 The data array.
560 If ``return_scales`` is True, returns a tuple containing the data array
561 and the scales for each dimension.
562
563 Raises
564 ------
565 ValueError
566 If the file does not have a `.hdf` or `.h5` extension.
567
568 See Also
569 --------
570 read_hdf_by_index
571 Read HDF datasets by index.
572 read_hdf_by_value
573 Read HDF datasets by value ranges.
574 read_hdf_by_ivalue
575 Read HDF datasets by subindex values.
576
577 Notes
578 -----
579 This function delegates to :func:`_read_h5_data` for HDF5 files
580 and :func:`_read_h4_data` for HDF4 files based on the file extension.
581 """
582 return _dispatch_by_ext(ifile, _read_h4_data, _read_h5_data,
583 dataset_id=dataset_id, return_scales=return_scales)
584
585
[docs]
586def read_hdf_by_index(ifile: Union[Path, str], /,
587 *xi: Union[int, Tuple[Union[int, None], Union[int, None]], None],
588 dataset_id: Optional[str] = None,
589 return_scales: bool = True,
590 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
591 """
592 Read data from an HDF4 (.hdf) or HDF5 (.h5) file by index.
593
594 .. attention::
595 For each dimension, the *minimum* number of elements returned is 1 *e.g.*
596 if 3 ints are passed (as positional `*xi` arguments) for a 3D dataset,
597 the resulting subset will have a shape of (1, 1, 1,) with scales of length 1.
598
599 Parameters
600 ----------
601 ifile : Path | str
602 The path to the HDF file to read.
603 *xi : int | tuple[int | None, int | None] | None
604 Indices or ranges for each dimension of the `n`-dimensional dataset.
605 Use None for a dimension to select all indices. If no arguments are passed,
606 the entire dataset (and its scales) will be returned – see
607 :func:`~psi_io.psi_io.read_hdf_data`.
608 dataset_id : str | None
609 The identifier of the dataset to read.
610 If None, a default dataset is used ('Data-Set-2' for HDF4 and 'Data' for HDF5).
611 return_scales : bool
612 If True, the scales (coordinate arrays) for each dimension are also returned.
613
614 Returns
615 -------
616 out : np.ndarray | tuple[np.ndarray]
617 The selected data array.
618 If ``return_scales`` is True, returns a tuple containing the data array
619 and the scales for each dimension.
620
621 Raises
622 ------
623 ValueError
624 If the file does not have a `.hdf` or `.h5` extension.
625
626 See Also
627 --------
628 read_hdf_by_value
629 Read HDF datasets by value ranges.
630 read_hdf_by_ivalue
631 Read HDF datasets by subindex values.
632 read_hdf_data
633 Read entire HDF datasets.
634
635 Notes
636 -----
637 This function delegates to :func:`_read_h5_by_index` for HDF5 files and
638 :func:`_read_h4_by_index` for HDF4 files based on the file extension.
639
640 This function assumes that the dataset is Fortran (or column-major) ordered *viz.* for
641 compatibility with PSI's data ecosystem; as such, a given :math:`n`-dimensional array,
642 of shape :math:`(D_0, D_1, ..., D_{n-1})`, has scales :math:`(x_0, x_1, ..., x_{n-1})`,
643 such that :math:`| x_i | = | D_{(n-1)-i} |`. For example, a 3D dataset with shape
644 :math:`(D_p, D_t, D_r)` has scales :math:`r, t, p` corresponding to the radial, theta,
645 and phi dimensions respectively.
646
647 This function extracts a subset of the given dataset/scales without reading the
648 entire data into memory. For a given scale :math:`x_j`, an index, index range, or ``None``
649 can be provided; these inputs are forwarded through to Python's builtin :class:`slice` to
650 extract the desired subset of the scale(s) / dataset.
651
652 Examples
653 --------
654 Import a 3D HDF5 cube.
655
656 >>> from psi_io.data import get_3d_data
657 >>> from psi_io import read_hdf_by_index
658 >>> filepath = get_3d_data()
659
660 Extract a radial slice (at the first radial-scale index) from a 3D cube:
661
662 >>> f, r, t, p = read_hdf_by_value(filepath, 0, None, None)
663 >>> f.shape, r.shape, t.shape, p.shape
664 ((181, 100, 1), (1,), (100,), (181,))
665
666 Extract a phi slice at the 90th index from a 3D cube:
667
668 >>> f, r, t, p = read_hdf_by_value(filepath, None, None, 90)
669 >>> f.shape, r.shape, t.shape, p.shape
670 ((1, 100, 151), (151,), (100,), (1,))
671
672 Extract the values up to the 20th index (in the radial dimension) and with
673 phi indices from 10 to 25:
674
675 >>> f, r, t, p = read_hdf_by_value(filepath, (None, 20), None, (10, 25))
676 >>> f.shape, r.shape, t.shape, p.shape
677 ((15, 100, 20), (20,), (100,), (15,))
678 """
679 if not xi:
680 return read_hdf_data(ifile, dataset_id=dataset_id, return_scales=return_scales)
681 return _dispatch_by_ext(ifile, _read_h4_by_index, _read_h5_by_index,
682 *xi, dataset_id=dataset_id, return_scales=return_scales)
683
684
[docs]
685def read_hdf_by_value(ifile: Union[Path, str], /,
686 *xi: Union[float, Tuple[float, float], None],
687 dataset_id: Optional[str] = None,
688 return_scales: bool = True,
689 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
690 """
691 Read data from an HDF4 (.hdf) or HDF5 (.h5) file by value.
692
693 .. note::
694 For each dimension, the minimum number of elements returned is 2 *e.g.*
695 if 3 floats are passed (as positional `*xi` arguments) for a 3D dataset,
696 the resulting subset will have a shape of (2, 2, 2,) with scales of length 2.
697
698 Parameters
699 ----------
700 ifile : Path | str
701 The path to the HDF file to read.
702 *xi : float | tuple[float, float] | None
703 Values or value ranges corresponding to each dimension of the `n`-dimensional
704 dataset specified by the ``dataset_id``. If no arguments are passed, the
705 entire dataset (and its scales) will be returned.
706 dataset_id : str | None
707 The identifier of the dataset to read.
708 If None, a default dataset is used ('Data-Set-2' for HDF4 and 'Data' for HDF5).
709 return_scales : bool
710 If True, the scales for the specified dataset are also returned.
711
712 Returns
713 -------
714 out : np.ndarray | tuple[np.ndarray]
715 The selected data array.
716 If ``return_scales`` is True, returns a tuple containing the data array
717 and the scales for each dimension.
718
719 Raises
720 ------
721 ValueError
722 If the file does not have a `.hdf` or `.h5` extension.
723
724 See Also
725 --------
726 read_hdf_by_index
727 Read HDF datasets by index.
728 read_hdf_by_ivalue
729 Read HDF datasets by subindex values.
730 read_hdf_data
731 Read entire HDF datasets.
732 sp_interpolate_slice_from_hdf
733 Interpolate slices from HDF datasets using SciPy's
734 :class:`~scipy.interpolate.RegularGridInterpolator`
735 np_interpolate_slice_from_hdf
736 Perform linear, bilinear, or trilinear interpolation using vectorized numpy-based
737 routines.
738
739 Notes
740 -----
741 This function delegates to :func:`_read_h5_by_value` for HDF5 files and
742 :func:`_read_h4_by_value` for HDF4 files based on the file extension.
743
744 This function assumes that the dataset is Fortran (or column-major) ordered *viz.* for
745 compatibility with PSI's data ecosystem; as such, a given :math:`n`-dimensional array,
746 of shape :math:`(D_0, D_1, ..., D_{n-1})`, has scales :math:`(x_0, x_1, ..., x_{n-1})`,
747 such that :math:`| x_i | = | D_{(n-1)-i} |`. For example, a 3D dataset with shape
748 :math:`(D_p, D_t, D_r)` has scales :math:`r, t, p` corresponding to the radial, theta,
749 and phi dimensions respectively.
750
751 This function extracts a subset of the given dataset/scales without reading the
752 entire data into memory. For a given scale :math:`x_j`, if:
753
754 - *i)* a single float is provided (:math:`a`), the function will return a 2-element
755 subset of the scale (:math:`xʹ_j`) such that :math:`xʹ_j[0] <= a < xʹ_j[1]`.
756 - *ii)* a (float, float) tuple is provided (:math:`a_0, a_1`), the function will return an
757 *m*-element subset of the scale (:math:`xʹ_j`) where
758 :math:`xʹ_j[0] <= a_0` and :math:`xʹ_j[m-1] > a_1`.
759 - *iii)* a **None** value is provided, the function will return the entire scale :math:`x_j`
760
761 The returned subset can then be passed to a linear interpolation routine to extract the
762 "slice" at the desired fixed dimensions.
763
764 Examples
765 --------
766 Import a 3D HDF5 cube.
767
768 >>> from psi_io.data import get_3d_data
769 >>> from psi_io import read_hdf_by_value
770 >>> filepath = get_3d_data()
771
772 Extract a radial slice at r=15 from a 3D cube:
773
774 >>> f, r, t, p = read_hdf_by_value(filepath, 15, None, None)
775 >>> f.shape, r.shape, t.shape, p.shape
776 ((181, 100, 2), (2,), (100,), (181,))
777
778 Extract a phi slice at p=1.57 from a 3D cube:
779
780 >>> f, r, t, p = read_hdf_by_value(filepath, None, None, 1.57)
781 >>> f.shape, r.shape, t.shape, p.shape
782 ((2, 100, 151), (151,), (100,), (2,))
783
784 Extract the values between 3.2 and 6.4 (in the radial dimension) and with
785 phi equal to 4.5
786
787 >>> f, r, t, p = read_hdf_by_value(filepath, (3.2, 6.4), None, 4.5)
788 >>> f.shape, r.shape, t.shape, p.shape
789 ((2, 100, 15), (15,), (100,), (2,))
790 """
791 if not xi:
792 return read_hdf_data(ifile, dataset_id=dataset_id, return_scales=return_scales)
793 return _dispatch_by_ext(ifile, _read_h4_by_value, _read_h5_by_value,
794 *xi, dataset_id=dataset_id, return_scales=return_scales)
795
796
[docs]
797def read_hdf_by_ivalue(ifile: Union[Path, str], /,
798 *xi: Union[float, Tuple[float, float], None],
799 dataset_id: Optional[str] = None,
800 return_scales: bool = True,
801 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
802 """
803 Read data from an HDF4 (.hdf) or HDF5 (.h5) file by value.
804
805 .. note::
806 For each dimension, the minimum number of elements returned is 2 *e.g.*
807 if 3 floats are passed (as positional `*xi` arguments) for a 3D dataset,
808 the resulting subset will have a shape of (2, 2, 2,) with scales of length 2.
809
810 Parameters
811 ----------
812 ifile : Path | str
813 The path to the HDF file to read.
814 *xi : float | tuple[float, float] | None
815 Values or value ranges corresponding to each dimension of the `n`-dimensional
816 dataset specified by the ``dataset_id``. If no arguments are passed, the
817 entire dataset (and its scales) will be returned.
818 dataset_id : str | None
819 The identifier of the dataset to read.
820 If None, a default dataset is used ('Data-Set-2' for HDF4 and 'Data' for HDF5).
821 return_scales : bool
822 If True, arrays of indices for the specified dataset are also returned.
823 Note, regardless of whether the provided dataset has coordinate variables
824 (scales), the returned index arrays are always 0-based indices generated
825 through :func:`~numpy.arange`.
826
827 Returns
828 -------
829 out : np.ndarray | tuple[np.ndarray]
830 The selected data array.
831 If ``return_scales`` is True, returns a tuple containing the data array
832 and the scales for each dimension.
833
834 Raises
835 ------
836 ValueError
837 If the file does not have a `.hdf` or `.h5` extension.
838
839 See Also
840 --------
841 read_hdf_by_index
842 Read HDF datasets by index.
843 read_hdf_data
844 Read entire HDF datasets.
845 sp_interpolate_slice_from_hdf
846 Interpolate slices from HDF datasets using SciPy's
847 :class:`~scipy.interpolate.RegularGridInterpolator`
848 np_interpolate_slice_from_hdf
849 Perform linear, bilinear, or trilinear interpolation using vectorized numpy-based
850 routines.
851
852 Notes
853 -----
854 This function delegates to :func:`_read_h5_by_ivalue` for HDF5 files and
855 :func:`_read_h4_by_ivalue` for HDF4 files based on the file extension.
856
857 This function assumes that the dataset is Fortran (or column-major) ordered *viz.* for
858 compatibility with PSI's data ecosystem; as such, a given :math:`n`-dimensional array,
859 of shape :math:`(D_0, D_1, ..., D_{n-1})`, has scales :math:`(x_0, x_1, ..., x_{n-1})`,
860 such that :math:`| x_i | = | D_{(n-1)-i} |`. For example, a 3D dataset with shape
861 :math:`(D_p, D_t, D_r)` has scales :math:`r, t, p` corresponding to the radial, theta,
862 and phi dimensions respectively.
863
864 This function extracts a subset of the given dataset/scales without reading the
865 entire data into memory. For a given scale :math:`x_j`, if:
866
867 - *i)* a single float is provided (:math:`a`), the function will return a 2-element
868 subset of the scale (:math:`xʹ_j`): :math:`xʹ_j[floor(a)], xʹ_j[ceil(a)]`.
869 - *ii)* a (float, float) tuple is provided (:math:`a_0, a_1`), the function will return an
870 *m*-element subset of the scale (:math:`xʹ_j`): :math:`xʹ_j[floor(a_0)], xʹ_j[ceil(a_1)]`
871 - *iii)* a **None** value is provided, the function will return the entire scale :math:`x_j`
872
873 The returned subset can then be passed to a linear interpolation routine to extract the
874 "slice" at the desired fixed dimensions.
875
876 .. warning::
877
878 """
879 if not xi:
880 return read_hdf_data(ifile, dataset_id=dataset_id, return_scales=return_scales)
881 return _dispatch_by_ext(ifile, _read_h4_by_ivalue, _read_h5_by_ivalue,
882 *xi, dataset_id=dataset_id, return_scales=return_scales)
883
884
[docs]
885def write_hdf_data(ifile: Union[Path, str], /,
886 data: np.ndarray,
887 *scales: Iterable[np.ndarray],
888 dataset_id: Optional[str] = None
889 ) -> Path:
890 """
891 Write data to an HDF4 (.hdf) or HDF5 (.h5) file.
892
893 Following PSI conventions, the data array is assumed to be Fortran-ordered,
894 with the scales provided in the order corresponding to each dimension *e.g.* a
895 3D dataset with shape (Dp, Dt, Dr) has scales r, t, p corresponding to the
896 radial, theta, and phi dimensions respectively.
897
898 Parameters
899 ----------
900 ifile : Path | str
901 The path to the HDF file to write.
902 data : np.ndarray
903 The data array to write.
904 *scales : Iterable[np.ndarray]
905 The scales (coordinate arrays) for each dimension.
906 dataset_id : str | None
907 The identifier of the dataset to write.
908 If None, a default dataset is used ('Data-Set-2' for HDF
909 and 'Data' for HDF5).
910
911 Returns
912 -------
913 out : Path
914 The path to the written HDF file.
915
916 Raises
917 ------
918 ValueError
919 If the file does not have a `.hdf` or `.h5` extension.
920
921 Notes
922 -----
923 This function delegates to :func:`_write_h5_data` for HDF5 files
924 and :func:`_write_h4_data` for HDF4 files based on the file extension.
925
926 See Also
927 --------
928 wrhdf_1d
929 Write 1D HDF files.
930 wrhdf_2d
931 Write 2D HDF files.
932 wrhdf_3d
933 Write 3D HDF files.
934 """
935 return _dispatch_by_ext(ifile, _write_h4_data, _write_h5_data, data,
936 *scales, dataset_id=dataset_id)
937
938
[docs]
939def instantiate_linear_interpolator(*args, **kwargs):
940 """
941 Instantiate a linear interpolator using the provided data and scales.
942
943 Parameters
944 ----------
945 *args : sequence[array_like]
946 The first argument is the data array.
947 Subsequent arguments are the scales (coordinate arrays) for each dimension.
948 **kwargs : dict
949 Additional keyword arguments to pass to
950 :class:`~scipy.interpolate.RegularGridInterpolator`.
951
952 Returns
953 -------
954 out : RegularGridInterpolator
955 An instance of RegularGridInterpolator initialized
956 with the provided data and scales.
957
958 Notes
959 -----
960 This function transposes the data array and passes it along with the scales
961 to RegularGridInterpolator. Given a PSI-style Fortran-ordered 3D dataset,
962 the resulting interpolator can be queried using (r, theta, phi) coordinates.
963
964 Examples
965 --------
966 Import a 3D HDF5 cube.
967
968 >>> from psi_io.data import get_3d_data
969 >>> from psi_io import read_hdf_by_value
970 >>> from numpy import pi
971 >>> filepath = get_3d_data()
972
973 Read the dataset by value (at 15 R_sun in the radial dimension).
974
975 >>> data_and_scales = read_hdf_by_value(filepath, 15, None, None)
976 >>> interpolator = instantiate_linear_interpolator(*data_and_scales)
977
978 Interpolate at a specific position.
979
980 >>> interpolator((15, pi/2, pi))
981 0.0012864485109423877
982 """
983 _except_no_scipy()
984 return RegularGridInterpolator(
985 values=args[0].T,
986 points=args[1:],
987 **kwargs)
988
989
[docs]
990def sp_interpolate_slice_from_hdf(*xi, **kwargs):
991 """
992 Interpolate a slice from HDF data using SciPy's `RegularGridInterpolator`.
993
994 .. note::
995 Slicing routines result in a dimensional reduction. The dimensions
996 that are fixed (i.e. provided as float values in `*xi`) are removed
997 from the output slice, while the dimensions that are not fixed
998 (*i.e.* provided as None in `*xi`) are retained.
999
1000 Parameters
1001 ----------
1002 *xi : sequence
1003 Positional arguments passed-through to :func:`read_hdf_by_value`.
1004 **kwargs : dict
1005 Keyword arguments passed-through to :func:`read_hdf_by_value`.
1006 **NOTE: Instantiating a linear interpolator requires the** ``return_scales``
1007 **keyword argument to be set to True; this function overrides
1008 any provided value for** ``return_scales`` **to ensure this behavior.**
1009
1010 Returns
1011 -------
1012 slice : np.ndarray
1013 The interpolated data slice.
1014 scales : list
1015 A list of scales for the dimensions that were not fixed.
1016
1017 Notes
1018 -----
1019 This function reads data from an HDF file, creates a linear interpolator,
1020 and interpolates a slice based on the provided values.
1021
1022 .. note::
1023 The returned slice is Fortran-ordered *e.g.* radial slices will have shape
1024 (phi, theta), phi slices will have shape (r, theta), etc.
1025
1026 .. note::
1027 SciPy's `RegularGridInterpolator` casts all input data to `float64` internally.
1028 Therefore, PSI HDF datasets with single-precision (`float32`) data will be upcast
1029 during interpolation.
1030
1031 Examples
1032 --------
1033 >>> from psi_io.data import get_3d_data
1034 >>> from psi_io import sp_interpolate_slice_from_hdf
1035 >>> from numpy import pi
1036 >>> filepath = get_3d_data()
1037
1038 Fetch a 2D slice at r=15 from 3D map
1039
1040 >>> slice_, theta_scale, phi_scale = sp_interpolate_slice_from_hdf(filepath, 15, None, None)
1041 >>> slice_.shape, theta_scale.shape, phi_scale.shape
1042 ((181, 100), (100,), (181,))
1043
1044 Fetch a single point from 3D map
1045
1046 >>> point_value, *_ = sp_interpolate_slice_from_hdf(filepath, 1, pi/2, pi)
1047 >>> point_value
1048 6.084495480971823
1049 """
1050 filepath, *args = xi
1051 kwargs.pop('return_scales', None)
1052 result = read_hdf_by_value(filepath, *args, **kwargs)
1053 interpolator = instantiate_linear_interpolator(*result)
1054 grid = [yi[0] if yi[0] is not None else yi[1] for yi in zip(args, result[1:])]
1055 slice_ = interpolator(tuple(np.meshgrid(*grid, indexing='ij')))
1056 indices = [0 if yi is not None else slice(None, None) for yi in args]
1057 return slice_[tuple(indices)].T, *[yi[1] for yi in zip(args, result[1:]) if yi[0] is None]
1058
1059
[docs]
1060def np_interpolate_slice_from_hdf(ifile: Union[Path, str], /,
1061 *xi: Union[float, Tuple[float, float], None],
1062 dataset_id: Optional[str] = None,
1063 by_index: bool = False,
1064 ):
1065 """
1066 Interpolate a slice from HDF data using linear interpolation.
1067
1068 .. note::
1069 Slicing routines result in a dimensional reduction. The dimensions
1070 that are fixed (i.e. provided as float values in `*xi`) are removed
1071 from the output slice, while the dimensions that are not fixed
1072 (*i.e.* provided as `None` in `*xi`) are retained.
1073
1074 Parameters
1075 ----------
1076 ifile : Path | str
1077 The path to the HDF file to read.
1078 *xi : sequence
1079 Positional arguments passed-through to reader function.
1080 dataset_id : str | None
1081 The identifier of the dataset to read.
1082 If None, a default dataset is used ('Data-Set-2' for HDF
1083 and 'Data' for HDF5).
1084 by_index : bool
1085 If True, use :func:`read_hdf_by_ivalue` to read data by subindex values.
1086 If False, use :func:`read_hdf_by_value` to read data by value ranges.
1087
1088 Returns
1089 -------
1090 slice : np.ndarray
1091 The interpolated data slice.
1092 scales : list
1093 A list of scales for the dimensions that were not fixed.
1094
1095 Raises
1096 ------
1097 ValueError
1098 If the number of dimensions to interpolate over is not supported.
1099
1100 Notes
1101 -----
1102 This function supports linear, bilinear, and trilinear interpolation
1103 depending on the number of dimensions fixed in `xi`.
1104
1105 Examples
1106 --------
1107 >>> from psi_io.data import get_3d_data
1108 >>> from psi_io import np_interpolate_slice_from_hdf
1109 >>> from numpy import pi
1110 >>> filepath = get_3d_data()
1111
1112 Fetch a 2D slice at r=15 from 3D map
1113
1114 >>> slice_, theta_scale, phi_scale = np_interpolate_slice_from_hdf(filepath, 15, None, None)
1115 >>> slice_.shape, theta_scale.shape, phi_scale.shape
1116 ((181, 100), (100,), (181,))
1117
1118 Fetch a single point from 3D map
1119
1120 >>> point_value, *_ = np_interpolate_slice_from_hdf(filepath, 1, pi/2, pi)
1121 >>> point_value
1122 6.084496
1123
1124 """
1125 reader = read_hdf_by_value if not by_index else read_hdf_by_ivalue
1126 data, *scales = reader(ifile, *xi, dataset_id=dataset_id, return_scales=True)
1127 f_ = np.transpose(data)
1128 slice_type = sum([yi is not None for yi in xi])
1129 if slice_type == 1:
1130 return _np_linear_interpolation(xi, scales, f_).T, *[yi[1] for yi in zip(xi, scales) if yi[0] is None]
1131 elif slice_type == 2:
1132 return _np_bilinear_interpolation(xi, scales, f_).T, *[yi[1] for yi in zip(xi, scales) if yi[0] is None]
1133 elif slice_type == 3:
1134 return _np_trilinear_interpolation(xi, scales, f_).T, *[yi[1] for yi in zip(xi, scales) if yi[0] is None]
1135 else:
1136 raise ValueError("Not a valid number of dimensions for supported linear interpolation methods")
1137
1138
[docs]
1139def interpolate_positions_from_hdf(ifile, *xi, **kwargs):
1140 """
1141 Interpolate at a list of scale positions using SciPy's `RegularGridInterpolator`.
1142
1143 Parameters
1144 ----------
1145 ifile : Path | str
1146 The path to the HDF file to read.
1147 *xi : sequence[np.ndarray]
1148 Iterable scale values for each dimension of the `n`-dimensional dataset.
1149 Each array should have the same shape *i.e.* :math:`(m, )` – the function composes
1150 these array into a :math:`m x n` column stack for interpolation.
1151 **kwargs : dict
1152 Keyword arguments to pass to :func:`read_hdf_by_value`.
1153
1154 Returns
1155 -------
1156 out : np.ndarray
1157 The interpolated values at the provided positions.
1158
1159 Notes
1160 -----
1161 This function reads data from an HDF file, creates a linear interpolator,
1162 and interpolates at the provided scale values. For each dimension, the
1163 minimum and maximum values from the provided arrays are used to read
1164 the necessary subset of data from the HDF file *viz.* to avoid loading
1165 the entire dataset into memory.
1166
1167 Examples
1168 --------
1169 Import a 3D HDF5 cube.
1170
1171 >>> from psi_io.data import get_3d_data
1172 >>> from psi_io import interpolate_positions_from_hdf
1173 >>> import numpy as np
1174 >>> filepath = get_3d_data()
1175
1176 Set up positions to interpolate.
1177
1178 >>> r_vals = np.array([15, 20, 25])
1179 >>> theta_vals = np.array([np.pi/4, np.pi/2, 3*np.pi/4])
1180 >>> phi_vals = np.array([0, np.pi, 2*np.pi])
1181
1182 Interpolate at the specified positions.
1183
1184 >>> interpolate_positions_from_hdf(filepath, r_vals, theta_vals, phi_vals)
1185 [0.0008402743657585175, 0.000723875405654482, -0.00041033233811179216]
1186 """
1187 xi_ = [(np.nanmin(i), np.nanmax(i)) for i in xi]
1188 f, *scales = read_hdf_by_value(ifile, *xi_, **kwargs)
1189 interpolator = instantiate_linear_interpolator(f, *scales, bounds_error=False)
1190 return interpolator(np.stack(xi, axis=len(xi[0].shape)))
1191
1192
[docs]
1193def interpolate_point_from_1d_slice(xi, scalex, values):
1194 """
1195 Interpolate a point from a 1D slice using linear interpolation.
1196
1197 Parameters
1198 ----------
1199 xi : float
1200 The scale value at which to interpolate.
1201 scalex : np.ndarray
1202 The scale (coordinate array) for the dimension.
1203 values : np.ndarray
1204 The data array to interpolate.
1205
1206 Returns
1207 -------
1208 out : np.ndarray
1209 The interpolated data point.
1210 """
1211 if scalex[0] > scalex[-1]:
1212 scalex, values = scalex[::-1], values[::-1]
1213 xi_ = int(np.searchsorted(scalex, xi))
1214 sx_ = slice(*_check_index_ranges(len(scalex), xi_, xi_))
1215 return _np_linear_interpolation([xi], [scalex[sx_]], values[sx_])
1216
1217
[docs]
1218def interpolate_point_from_2d_slice(xi, yi, scalex, scaley, values):
1219 """
1220 Interpolate a point from a 2D slice using bilinear interpolation.
1221
1222 Parameters
1223 ----------
1224 xi : float
1225 The scale value for the first dimension.
1226 yi : float
1227 The scale value for the second dimension.
1228 scalex : np.ndarray
1229 The scale (coordinate array) for the first dimension.
1230 scaley : np.ndarray
1231 The scale (coordinate array) for the second dimension.
1232 values : np.ndarray
1233 The data array to interpolate.
1234
1235 Returns
1236 -------
1237 out : np.ndarray
1238 The interpolated data point.
1239 """
1240 values = np.transpose(values)
1241 if scalex[0] > scalex[-1]:
1242 scalex, values = scalex[::-1], values[::-1, :]
1243 if scaley[0] > scaley[-1]:
1244 scaley, values = scaley[::-1], values[:, ::-1]
1245 xi_, yi_ = int(np.searchsorted(scalex, xi)), int(np.searchsorted(scaley, yi))
1246 sx_, sy_ = slice(*_check_index_ranges(len(scalex), xi_, xi_)), slice(*_check_index_ranges(len(scaley), yi_, yi_))
1247 return _np_bilinear_interpolation([xi, yi], [scalex[sx_], scaley[sy_]], values[(sx_, sy_)])
1248
1249
1250def _rdhdf_nd(hdf_filename: str, dimensionality: int) -> Tuple[np.ndarray, ...]:
1251 f, *scales = read_hdf_data(hdf_filename)
1252 if f.ndim != dimensionality:
1253 err = f'Expected {dimensionality}D data, got {f.ndim}D data instead.'
1254 raise ValueError(err)
1255 scales = scales or (np.empty(0) for _ in f.shape)
1256 return *scales, f
1257
1258
1259def _wrh5(h5_filename: str,
1260 x: np.ndarray,
1261 y: np.ndarray,
1262 z: np.ndarray,
1263 f: np.ndarray) -> None:
1264 """Base writer for 1D, 2D, and 3D HDF5 files.
1265
1266 See Also
1267 --------
1268 :func:`wrhdf_1d`, :func:`wrhdf_2d`, :func:`wrhdf_3d`
1269 """
1270 h5file = h5.File(h5_filename, 'w')
1271
1272 # Create the dataset (Data is the name used by the psi data)).
1273 h5file.create_dataset("Data", data=f)
1274
1275 # Make sure the scales are desired by checking x type, which can
1276 # be None or None converted by np.asarray (have to trap seperately)
1277 if x is None:
1278 x = np.array([], dtype=f.dtype)
1279 y = np.array([], dtype=f.dtype)
1280 z = np.array([], dtype=f.dtype)
1281 if x.any() == None:
1282 x = np.array([], dtype=f.dtype)
1283 y = np.array([], dtype=f.dtype)
1284 z = np.array([], dtype=f.dtype)
1285
1286 # Make sure scales are the same precision as data.
1287 x = x.astype(f.dtype)
1288 y = y.astype(f.dtype)
1289 z = z.astype(f.dtype)
1290
1291 # Get number of dimensions:
1292 ndims = np.ndim(f)
1293
1294 # Set the scales:
1295 for i in range(0, ndims):
1296 if i == 0 and len(x) != 0:
1297 dim = h5file.create_dataset("dim1", data=x)
1298 # h5file['Data'].dims.create_scale(dim,'dim1')
1299 dim.make_scale('dim1')
1300 h5file['Data'].dims[0].attach_scale(dim)
1301 h5file['Data'].dims[0].label = 'dim1'
1302 if i == 1 and len(y) != 0:
1303 dim = h5file.create_dataset("dim2", data=y)
1304 # h5file['Data'].dims.create_scale(dim,'dim2')
1305 dim.make_scale('dim2')
1306 h5file['Data'].dims[1].attach_scale(dim)
1307 h5file['Data'].dims[1].label = 'dim2'
1308 elif i == 2 and len(z) != 0:
1309 dim = h5file.create_dataset("dim3", data=z)
1310 # h5file['Data'].dims.create_scale(dim,'dim3')
1311 dim.make_scale('dim3')
1312 h5file['Data'].dims[2].attach_scale(dim)
1313 h5file['Data'].dims[2].label = 'dim3'
1314
1315 # Close the file:
1316 h5file.close()
1317
1318
1319def _wrhdf(hdf_filename: str,
1320 x: np.ndarray,
1321 y: np.ndarray,
1322 z: np.ndarray,
1323 f: np.ndarray) -> None:
1324 """Base writer for 1D, 2D, and 3D HDF4 files.
1325
1326 See Also
1327 --------
1328 :func:`wrhdf_1d`, :func:`wrhdf_2d`, :func:`wrhdf_3d`
1329 """
1330 if hdf_filename.endswith('h5'):
1331 return _wrh5(hdf_filename, x, y, z, f)
1332
1333 # Check for HDF4
1334 _except_no_pyhdf()
1335
1336 # Create an HDF file
1337 sd_id = h4.SD(hdf_filename, h4.SDC.WRITE | h4.SDC.CREATE | h4.SDC.TRUNC)
1338
1339 # Due to bug, need to only write 64-bit.
1340 f = f.astype(np.float64)
1341 ftype = h4.SDC.FLOAT64
1342
1343 # if f.dtype == np.float32:
1344 # ftype = h4.SDC.FLOAT32
1345 # elif f.dtype == np.float64:
1346 # ftype = h4.SDC.FLOAT64
1347
1348 # Create the dataset (Data-Set-2 is the name used by the psi data)).
1349 sds_id = sd_id.create("Data-Set-2", ftype, f.shape)
1350
1351 # Get number of dimensions:
1352 ndims = np.ndim(f)
1353
1354 # Make sure the scales are desired by checking x type, which can
1355 # be None or None converted by np.asarray (have to trap seperately)
1356 if x is None:
1357 x = np.array([], dtype=f.dtype)
1358 y = np.array([], dtype=f.dtype)
1359 z = np.array([], dtype=f.dtype)
1360 if x.any() == None:
1361 x = np.array([], dtype=f.dtype)
1362 y = np.array([], dtype=f.dtype)
1363 z = np.array([], dtype=f.dtype)
1364
1365 # Due to python hdf4 bug, need to use double scales only.
1366
1367 # NOTE: this bug is due to an inability of SWIG (which is used to generate
1368 # the C-to-python pyhdf bindings) to handle numpy types in the overloaded functions.
1369 # The only reason that np.float64 works is that it is equivalent to
1370 # the native python float type. This issue has been remedied below in the
1371 # write_hdf functions.
1372
1373 x = x.astype(np.float64)
1374 y = y.astype(np.float64)
1375 z = z.astype(np.float64)
1376
1377 # Set the scales (or don't if x is none or length zero)
1378 for i in range(0, ndims):
1379 dim = sds_id.dim(i)
1380 if i == 0 and len(x) != 0:
1381 if x.dtype == np.float32:
1382 stype = h4.SDC.FLOAT32
1383 elif x.dtype == np.float64:
1384 stype = h4.SDC.FLOAT64
1385 dim.setscale(stype, x)
1386 elif i == 1 and len(y) != 0:
1387 if y.dtype == np.float32:
1388 stype = h4.SDC.FLOAT32
1389 elif y.dtype == np.float64:
1390 stype = h4.SDC.FLOAT64
1391 dim.setscale(stype, y)
1392 elif i == 2 and len(z) != 0:
1393 if z.dtype == np.float32:
1394 stype = h4.SDC.FLOAT32
1395 elif z.dtype == np.float64:
1396 stype = h4.SDC.FLOAT64
1397 dim.setscale(stype, z)
1398
1399 # Write the data:
1400 sds_id.set(f)
1401
1402 # Close the dataset:
1403 sds_id.endaccess()
1404
1405 # Flush and close the HDF file:
1406 sd_id.end()
1407
1408
1409def _get_scales_nd_h5(ifile: Union[ Path, str], /,
1410 dimensionality: int,
1411 dataset_id: Optional[str] = None,
1412 ):
1413 with h5.File(ifile, 'r') as hdf:
1414 data = hdf[dataset_id or PSI_DATA_ID['h5']]
1415 ndim = data.ndim
1416 if ndim != dimensionality:
1417 err = f'Expected {dimensionality}D data, got {ndim}D data instead.'
1418 raise ValueError(err)
1419 scales = []
1420 for dim in data.dims:
1421 if dim:
1422 scales.append(dim[0][:])
1423 else:
1424 raise ValueError(f'Dimension has no scale associated with it.')
1425 return tuple(scales)
1426
1427
1428def _get_scales_nd_h4(ifile: Union[ Path, str], /,
1429 dimensionality: int,
1430 dataset_id: Optional[str] = None,
1431 ):
1432 hdf = h4.SD(str(ifile))
1433 data = hdf.select(dataset_id or PSI_DATA_ID['h4'])
1434 ndim = data.info()[1]
1435 if ndim != dimensionality:
1436 err = f'Expected {dimensionality}D data, got {ndim}D data instead.'
1437 raise ValueError(err)
1438 scales = []
1439 for k_, v_ in reversed(data.dimensions(full=1).items()):
1440 if v_[3]:
1441 scales.append(hdf.select(k_)[:])
1442 else:
1443 raise ValueError('Dimension has no scale associated with it.')
1444 return tuple(scales)
1445
1446
1447def _read_h5_meta(ifile: Union[Path, str], /,
1448 dataset_id: Optional[str] = None
1449 ):
1450 """HDF5 (.h5) version of :func:`read_hdf_meta`."""
1451 with h5.File(ifile, 'r') as hdf:
1452 # Raises KeyError if ``dataset_id`` not found
1453 # If ``dataset_id`` is None, get all non-scale :class:`h5.Dataset`s
1454 if dataset_id:
1455 datasets = (dataset_id, hdf[dataset_id]),
1456 else:
1457 datasets = ((k, v) for k, v in hdf.items() if not v.is_scale)
1458
1459 # One should avoid multiple calls to ``dimproxy[0]`` – *e.g.* ``dimproxy[0].dtype`` and
1460 # ``dimproxy[0].shape`` – because the __getitem__ method creates and returns a new
1461 # :class:`~h5.DimensionProxy` object each time it is called. [Does this matter? Probably not.]
1462 return [HdfDataMeta(name=k,
1463 type=v.dtype,
1464 shape=v.shape,
1465 scales=[HdfScaleMeta(name=dimproxy.label,
1466 type=dim.dtype,
1467 shape=dim.shape,
1468 imin=dim[0],
1469 imax=dim[-1])
1470 for dimproxy in v.dims if dimproxy and (dim := dimproxy[0])])
1471 for k, v in datasets]
1472
1473
1474def _read_h4_meta(ifile: Union[Path, str], /,
1475 dataset_id: Optional[str] = None
1476 ):
1477 """HDF4 (.hdf) version of :func:`read_hdf_meta`."""
1478 hdf = h4.SD(str(ifile))
1479 # Raises HDF4Error if ``dataset_id`` not found
1480 # If ``dataset_id`` is None, get all non-scale :class:`pyhdf.SD.SDS`s
1481 if dataset_id:
1482 datasets = (dataset_id, hdf.select(dataset_id)),
1483 else:
1484 datasets = ((k, hdf.select(k)) for k in hdf.datasets().keys() if not hdf.select(k).iscoordvar())
1485
1486 # The inner list comprehension differs in approach from the HDF5 version because calling
1487 # ``dimensions(full=1)`` on an :class:`~pyhdf.SD.SDS` returns a dictionary of dimension
1488 # dataset identifiers (keys) and tuples containing dimension metadata (values). Even if no
1489 # coordinate-variable datasets are defined, this dictionary is still returned; the only
1490 # indication that the datasets returned do not exist is that the "type" field (within the
1491 # tuple of dimension metadata) is set to 0.
1492
1493 # Also, one cannot avoid multiple calls to ``hdf.select(k_)`` within the inner list comprehension
1494 # because :class:`~pyhdf.SD.SDS` objects do not define a ``__bool__`` method, and the fallback
1495 # behavior of Python is to assess if the __len__ method returns a non-zero value (which, in
1496 # this case, always returns 0).
1497 return [HdfDataMeta(name=k,
1498 type=SDC_TYPE_CONVERSIONS[v.info()[3]],
1499 shape=_cast_shape_tuple(v.info()[2]),
1500 scales=[HdfScaleMeta(name=k_,
1501 type=SDC_TYPE_CONVERSIONS[v_[3]],
1502 shape=_cast_shape_tuple(v_[0]),
1503 imin=hdf.select(k_)[0],
1504 imax=hdf.select(k_)[-1])
1505 for k_, v_ in v.dimensions(full=1).items() if v_[3]])
1506 for k, v in datasets]
1507
1508
1509def _read_h5_rtp(ifile: Union[ Path, str], /):
1510 """HDF5 (.h5) version of :func:`read_rtp_meta`."""
1511 with h5.File(ifile, 'r') as hdf:
1512 return {k: (hdf[v].size, hdf[v][0], hdf[v][-1])
1513 for k, v in zip('rtp', PSI_SCALE_ID['h5'])}
1514
1515
1516def _read_h4_rtp(ifile: Union[ Path, str], /):
1517 """HDF4 (.hdf) version of :func:`read_rtp_meta`."""
1518 hdf = h4.SD(str(ifile))
1519 return {k: (hdf.select(v).info()[2], hdf.select(v)[0], hdf.select(v)[-1])
1520 for k, v in zip('ptr', PSI_SCALE_ID['h4'])}
1521
1522
1523def _read_h5_data(ifile: Union[Path, str], /,
1524 dataset_id: Optional[str] = None,
1525 return_scales: bool = True,
1526 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1527 with h5.File(ifile, 'r') as hdf:
1528 data = hdf[dataset_id or PSI_DATA_ID['h5']]
1529 dataset = data[:]
1530 if return_scales:
1531 scales = [dim[0][:] for dim in data.dims if dim]
1532 return dataset, *scales
1533 return dataset
1534
1535
1536def _read_h4_data(ifile: Union[Path, str], /,
1537 dataset_id: Optional[str] = None,
1538 return_scales: bool = True,
1539 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1540 hdf = h4.SD(str(ifile))
1541 data = hdf.select(dataset_id or PSI_DATA_ID['h4'])
1542 if return_scales:
1543 out = (data[:],
1544 *[hdf.select(k_)[:] for k_, v_ in reversed(data.dimensions(full=1).items()) if v_[3]])
1545 else:
1546 out = data[:]
1547 return out
1548
1549
1550def _read_h5_by_index(ifile: Union[Path, str], /,
1551 *xi: Union[int, Tuple[Union[int, None], Union[int, None]], None],
1552 dataset_id: Optional[str] = None,
1553 return_scales: bool = True,
1554 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1555 """ HDF5(.h5) version of :func:`read_hdf_by_index`."""
1556 with (h5.File(ifile, 'r') as hdf):
1557 data = hdf[dataset_id or PSI_DATA_ID['h5']]
1558 if len(xi) != data.ndim:
1559 raise ValueError(f"len(xi) must equal the number of scales for {dataset_id}")
1560 slices = [_parse_index_inputs(slice_input) for slice_input in xi]
1561 dataset = data[tuple(reversed(slices))]
1562 if return_scales:
1563 scales = [dim[0][si] for si, dim in zip(slices, data.dims) if dim]
1564 return dataset, *scales
1565 return dataset
1566
1567def _read_h4_by_index(ifile: Union[Path, str], /,
1568 *xi: Union[int, Tuple[Union[int, None], Union[int, None]], None],
1569 dataset_id: Optional[str] = None,
1570 return_scales: bool = True,
1571 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1572 """HDF4(.hdf) version of :func:`read_hdf_by_index`."""
1573 hdf = h4.SD(str(ifile))
1574 data = hdf.select(dataset_id or PSI_DATA_ID['h4'])
1575 ndim = data.info()[1]
1576 if len(xi) != ndim:
1577 raise ValueError(f"len(xi) must equal the number of scales for {dataset_id}")
1578 slices = [_parse_index_inputs(slice_input) for slice_input in xi]
1579 dataset = data[tuple(reversed(slices))]
1580 if return_scales:
1581 scales = [hdf.select(k_)[si] for si, (k_, v_) in zip(slices, reversed(data.dimensions(full=1).items())) if v_[3]]
1582 return dataset, *scales
1583 return dataset
1584
1585
1586def _read_h5_by_value(ifile: Union[Path, str], /,
1587 *xi: Union[float, Tuple[float, float], None],
1588 dataset_id: Optional[str] = None,
1589 return_scales: bool = True,
1590 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1591 """HDF5 (.h5) version of :func:`read_hdf_by_value`."""
1592 with (h5.File(ifile, 'r') as hdf):
1593 data = hdf[dataset_id or PSI_DATA_ID['h5']]
1594 if len(xi) != data.ndim:
1595 raise ValueError(f"len(xi) must equal the number of scales for {dataset_id}")
1596 slices = []
1597 for dimproxy, value in zip(data.dims, xi):
1598 if dimproxy:
1599 slices.append(_parse_value_inputs(dimproxy[0], value))
1600 elif value is None:
1601 slices.append(slice(None))
1602 else:
1603 raise ValueError("Cannot slice by value on dimension without scales")
1604 dataset = data[tuple(reversed(slices))]
1605 if return_scales:
1606 scales = [dim[0][si] for si, dim in zip(slices, data.dims) if dim]
1607 return dataset, *scales
1608 return dataset
1609
1610
1611def _read_h4_by_value(ifile: Union[Path, str], /,
1612 *xi: Union[float, Tuple[float, float], None],
1613 dataset_id: Optional[str] = None,
1614 return_scales: bool = True,
1615 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1616 """HDF4 (.hdf) version of :func:`read_hdf_by_value`."""
1617 hdf = h4.SD(str(ifile))
1618 data = hdf.select(dataset_id or PSI_DATA_ID['h4'])
1619 ndim = data.info()[1]
1620 if len(xi) != ndim:
1621 raise ValueError(f"len(xi) must equal the number of scales for {dataset_id}")
1622 slices = []
1623 for (k_, v_), value in zip(reversed(data.dimensions(full=1).items()), xi):
1624 if v_[3] != 0:
1625 slices.append(_parse_value_inputs(hdf.select(k_), value))
1626 elif value is None:
1627 slices.append(slice(None))
1628 else:
1629 raise ValueError("Cannot slice by value on dimension without scales")
1630 dataset = data[tuple(reversed(slices))]
1631 if return_scales:
1632 scales = [hdf.select(k_)[si] for si, (k_, v_) in zip(slices, reversed(data.dimensions(full=1).items())) if v_[3]]
1633 return dataset, *scales
1634 return dataset
1635
1636
1637def _read_h5_by_ivalue(ifile: Union[Path, str], /,
1638 *xi: Union[float, Tuple[float, float], None],
1639 dataset_id: Optional[str] = None,
1640 return_scales: bool = True,
1641 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1642 with (h5.File(ifile, 'r') as hdf):
1643 data = hdf[dataset_id or PSI_DATA_ID['h5']]
1644 if len(xi) != data.ndim:
1645 raise ValueError(f"len(xi) must equal the number of scales for {dataset_id}")
1646 slices = [_parse_ivalue_inputs(*args) for args in zip(reversed(data.shape), xi)]
1647 dataset = data[tuple(reversed(slices))]
1648 if return_scales:
1649 scales = [np.arange(si.start or 0, si.stop or size) for si, size in zip(slices, reversed(data.shape))]
1650 return dataset, *scales
1651 return dataset
1652
1653
1654def _read_h4_by_ivalue(ifile: Union[Path, str], /,
1655 *xi: Union[float, Tuple[float, float], None],
1656 dataset_id: Optional[str] = None,
1657 return_scales: bool = True,
1658 ) -> Union[np.ndarray, Tuple[np.ndarray]]:
1659 hdf = h4.SD(str(ifile))
1660 data = hdf.select(dataset_id or PSI_DATA_ID['h4'])
1661 ndim, shape = data.info()[1], _cast_shape_tuple(data.info()[2])
1662 if len(xi) != ndim:
1663 raise ValueError(f"len(xi) must equal the number of scales for {dataset_id}")
1664 slices = [_parse_ivalue_inputs(*args) for args in zip(reversed(shape), xi)]
1665 dataset = data[tuple(reversed(slices))]
1666 if return_scales:
1667 scales = [np.arange(si.start or 0, si.stop or size) for si, size in zip(slices, reversed(shape))]
1668 return dataset, *scales
1669 return dataset
1670
1671
1672def _write_h4_data(ifile: Union[Path, str], /,
1673 data: np.ndarray,
1674 *scales: Iterable[np.ndarray],
1675 dataset_id: Optional[str] = None
1676 ) -> Path:
1677 dataid = dataset_id or PSI_DATA_ID['h4']
1678 h4file = h4.SD(str(ifile), h4.SDC.WRITE | h4.SDC.CREATE | h4.SDC.TRUNC)
1679 sds_id = h4file.create(dataid, NPTYPES_TO_SDCTYPES[data.dtype.name], data.shape)
1680
1681 if scales:
1682 for i, scale in enumerate(reversed(scales)):
1683 sds_id.dim(i).setscale(NPTYPES_TO_SDCTYPES[scale.dtype.name], scale.tolist())
1684
1685 sds_id.set(data)
1686 sds_id.endaccess()
1687 h4file.end()
1688
1689 return ifile
1690
1691
1692def _write_h5_data(ifile: Union[Path, str], /,
1693 data: np.ndarray,
1694 *scales: Iterable[np.ndarray],
1695 dataset_id: Optional[str] = None
1696 ) -> Path:
1697 dataid = dataset_id or PSI_DATA_ID['h5']
1698 with h5.File(ifile, "w") as h5file:
1699 h5file.create_dataset(dataid, data=data, dtype=data.dtype, shape=data.shape)
1700
1701 if scales:
1702 for i, scale in enumerate(scales):
1703 h5file.create_dataset(f"dim{i+1}", data=scale, dtype=scale.dtype, shape=scale.shape)
1704 h5file[dataid].dims[i].attach_scale(h5file[f"dim{i+1}"])
1705 h5file[dataid].dims[i].label = f"dim{i+1}"
1706
1707 return ifile
1708
1709
1710def _np_linear_interpolation(xi, scales, values):
1711 """
1712 Perform linear interpolation over one dimension.
1713
1714 Parameters
1715 ----------
1716 xi : list
1717 List of values or None for each dimension.
1718 scales : list
1719 List of scales (coordinate arrays) for each dimension.
1720 values : np.ndarray
1721 The data array to interpolate.
1722
1723 Returns
1724 -------
1725 np.ndarray
1726 The interpolated data.
1727
1728 """
1729 index0 = next((i for i, v in enumerate(xi) if v is not None), None)
1730 t = (xi[index0] - scales[index0][0])/(scales[index0][1] - scales[index0][0])
1731 f0 = [slice(None, None)]*values.ndim
1732 f1 = [slice(None, None)]*values.ndim
1733 f0[index0] = 0
1734 f1[index0] = 1
1735
1736 return (1 - t)*values[tuple(f0)] + t*values[tuple(f1)]
1737
1738
1739def _np_bilinear_interpolation(xi, scales, values):
1740 """
1741 Perform bilinear interpolation over two dimensions.
1742
1743 Parameters
1744 ----------
1745 xi : list
1746 List of values or None for each dimension.
1747 scales : list
1748 List of scales (coordinate arrays) for each dimension.
1749 values : np.ndarray
1750 The data array to interpolate.
1751
1752 Returns
1753 -------
1754 np.ndarray
1755 The interpolated data.
1756
1757 """
1758 index0, index1 = [i for i, v in enumerate(xi) if v is not None]
1759 t, u = [(xi[i] - scales[i][0])/(scales[i][1] - scales[i][0]) for i in (index0, index1)]
1760
1761 f00 = [slice(None, None)]*values.ndim
1762 f10 = [slice(None, None)]*values.ndim
1763 f01 = [slice(None, None)]*values.ndim
1764 f11 = [slice(None, None)]*values.ndim
1765 f00[index0], f00[index1] = 0, 0
1766 f10[index0], f10[index1] = 1, 0
1767 f01[index0], f01[index1] = 0, 1
1768 f11[index0], f11[index1] = 1, 1
1769
1770 return (
1771 (1 - t)*(1 - u)*values[tuple(f00)] +
1772 t*(1 - u)*values[tuple(f10)] +
1773 (1 - t)*u*values[tuple(f01)] +
1774 t*u*values[tuple(f11)]
1775 )
1776
1777
1778def _np_trilinear_interpolation(xi, scales, values):
1779 """
1780 Perform trilinear interpolation over three dimensions.
1781
1782 Parameters
1783 ----------
1784 xi : list
1785 List of values or None for each dimension.
1786 scales : list
1787 List of scales (coordinate arrays) for each dimension.
1788 values : np.ndarray
1789 The data array to interpolate.
1790
1791 Returns
1792 -------
1793 np.ndarray
1794 The interpolated data.
1795
1796 """
1797 index0, index1, index2 = [i for i, v in enumerate(xi) if v is not None]
1798 t, u, v = [(xi[i] - scales[i][0])/(scales[i][1] - scales[i][0]) for i in (index0, index1, index2)]
1799
1800 f000 = [slice(None, None)]*values.ndim
1801 f100 = [slice(None, None)]*values.ndim
1802 f010 = [slice(None, None)]*values.ndim
1803 f110 = [slice(None, None)]*values.ndim
1804 f001 = [slice(None, None)]*values.ndim
1805 f101 = [slice(None, None)]*values.ndim
1806 f011 = [slice(None, None)]*values.ndim
1807 f111 = [slice(None, None)]*values.ndim
1808
1809 f000[index0], f000[index1], f000[index2] = 0, 0, 0
1810 f100[index0], f100[index1], f100[index2] = 1, 0, 0
1811 f010[index0], f010[index1], f010[index2] = 0, 1, 0
1812 f110[index0], f110[index1], f110[index2] = 1, 1, 0
1813 f001[index0], f001[index1], f001[index2] = 0, 0, 1
1814 f101[index0], f101[index1], f101[index2] = 1, 0, 1
1815 f011[index0], f011[index1], f011[index2] = 0, 1, 1
1816 f111[index0], f111[index1], f111[index2] = 1, 1, 1
1817
1818 c00 = values[tuple(f000)]*(1 - t) + values[tuple(f100)]*t
1819 c10 = values[tuple(f010)]*(1 - t) + values[tuple(f110)]*t
1820 c01 = values[tuple(f001)]*(1 - t) + values[tuple(f101)]*t
1821 c11 = values[tuple(f011)]*(1 - t) + values[tuple(f111)]*t
1822
1823 c0 = c00*(1 - u) + c10*u
1824 c1 = c01*(1 - u) + c11*u
1825
1826 return c0*(1 - v) + c1*v
1827
1828
1829def _check_index_ranges(arr_size: int,
1830 i0: Union[int, np.integer],
1831 i1: Union[int, np.integer]
1832 ) -> Tuple[int, int]:
1833 """
1834 Adjust index ranges to ensure they cover at least two indices.
1835
1836 Parameters
1837 ----------
1838 arr_size : int
1839 The size of the array along the dimension.
1840 i0 : int
1841 The starting index.
1842 i1 : int
1843 The ending index.
1844
1845 Returns
1846 -------
1847 Tuple[int, int]
1848 Adjusted starting and ending indices.
1849
1850 Notes
1851 -----
1852 This function ensures that the range between `i0` and `i1` includes at least
1853 two indices for interpolation purposes.
1854
1855 """
1856 i0, i1 = int(i0), int(i1)
1857 if i0 == 0:
1858 return (i0, i1 + 2) if i1 == 0 else (i0, i1 + 1)
1859 elif i0 == arr_size:
1860 return i0 - 2, i1
1861 else:
1862 return i0 - 1, i1 + 1
1863
1864
1865def _cast_shape_tuple(input: Union[int, Iterable[int]]) -> tuple[int, ...]:
1866 """
1867 Cast an input to a tuple of integers.
1868
1869 Parameters
1870 ----------
1871 input : int | Iterable[int]
1872 The input to cast.
1873
1874 Returns
1875 -------
1876 tuple[int]
1877 The input cast as a tuple of integers.
1878
1879 Raises
1880 ------
1881 TypeError
1882 If the input is neither an integer nor an iterable of integers.
1883 """
1884 if isinstance(input, int):
1885 return (input,)
1886 elif isinstance(input, Iterable):
1887 return tuple(int(i) for i in input)
1888 else:
1889 raise TypeError("Input must be an integer or an iterable of integers.")
1890
1891
1892def _parse_index_inputs(input: Union[int, slice, Iterable[Union[int, None]], None]) -> slice:
1893 """
1894 Parse various slice input formats into a standard slice object.
1895
1896 Parameters
1897 ----------
1898 input : int | slice | Iterable[Union[int, None]] | None
1899 The input to parse.
1900 arr_size : int
1901 The size of the array along the dimension.
1902
1903 Returns
1904 -------
1905 slice
1906 The parsed slice object.
1907
1908 Raises
1909 ------
1910 TypeError
1911 If the input type is unsupported.
1912 ValueError
1913 If the input iterable does not have exactly two elements.
1914 """
1915 if isinstance(input, int):
1916 return slice(input, input + 1)
1917 elif isinstance(input, Iterable):
1918 return slice(*input)
1919 elif input is None:
1920 return slice(None)
1921 else:
1922 raise TypeError("Unsupported input type for slicing.")
1923
1924
1925def _parse_value_inputs(dimproxy, value, scale_exists: bool = True) -> slice:
1926 if value is None:
1927 return slice(None)
1928 if not scale_exists:
1929 raise ValueError("Cannot parse value inputs when scale does not exist.")
1930 dim = dimproxy[:]
1931 if not isinstance(value, Iterable):
1932 insert_index = np.searchsorted(dim, value)
1933 return slice(*_check_index_ranges(dim.size, insert_index, insert_index))
1934 else:
1935 temp_range = list(value)
1936 if temp_range[0] is None:
1937 temp_range[0] = -np.inf
1938 if temp_range[-1] is None:
1939 temp_range[-1] = np.inf
1940 insert_indices = np.searchsorted(dim, temp_range)
1941 return slice(*_check_index_ranges(dim.size, *insert_indices))
1942
1943
1944def _parse_ivalue_inputs(dimsize, input: Union[Union[int, float], slice, Iterable[Union[Union[int, float], None]], None]) -> slice:
1945 """
1946 Parse various slice input formats into a standard slice object.
1947
1948 Parameters
1949 ----------
1950 input : int | slice | Iterable[Union[int, None]] | None
1951 The input to parse.
1952 arr_size : int
1953 The size of the array along the dimension.
1954
1955 Returns
1956 -------
1957 slice
1958 The parsed slice object.
1959
1960 Raises
1961 ------
1962 TypeError
1963 If the input type is unsupported.
1964 ValueError
1965 If the input iterable does not have exactly two elements.
1966 """
1967 if input is None:
1968 return slice(None)
1969 elif isinstance(input, (int, float)):
1970 i0, i1 = math.floor(input), math.ceil(input)
1971 elif isinstance(input, Iterable):
1972 i0, i1 = math.floor(input[0]), math.ceil(input[1])
1973 else:
1974 raise TypeError("Unsupported input type for slicing.")
1975
1976 if i0 > i1:
1977 i0, i1 = i1, i0
1978 i0, i1 = max(0, i0), min(dimsize - 1, i1)
1979 if i0 > i1:
1980 i0, i1 = i1, i0
1981 i0, i1 = max(0, i0), min(dimsize - 1, i1)
1982 if (i1 - i0) < 2:
1983 return slice(i0, i1 + 2 - (i1-i0))
1984 else:
1985 return slice(i0, i1)