gluonts.dataset.arrow.file module#

class gluonts.dataset.arrow.file.ArrowFile(path: pathlib.Path, _start: int = 0, _take: Union[int, NoneType] = None)[source]#

Bases: gluonts.dataset.arrow.file.File

property batch_offsets#
decoder: gluonts.dataset.arrow.dec.ArrowDecoder#
iter_batches()[source]#
location_for(idx)[source]#
metadata() Dict[str, str][source]#
path: pathlib.Path#
reader: pyarrow.ipc.RecordBatchFileReader#
property schema#
class gluonts.dataset.arrow.file.ArrowStreamFile(path: pathlib.Path, _start: int = 0, _take: Union[int, NoneType] = None)[source]#

Bases: gluonts.dataset.arrow.file.File

metadata() Dict[str, str][source]#
path: pathlib.Path#
class gluonts.dataset.arrow.file.File[source]#

Bases: object

SUFFIXES = {'.arrow', '.feather', '.parquet'}#
static infer(path: pathlib.Path) Union[gluonts.dataset.arrow.file.ArrowFile, gluonts.dataset.arrow.file.ArrowStreamFile, gluonts.dataset.arrow.file.ParquetFile][source]#

Return ArrowFile, ArrowStreamFile or ParquetFile by inspecting provided path.

Arrow’s random-access format starts with ARROW1, so we peek the provided file for it.

abstract metadata() Dict[str, str][source]#
class gluonts.dataset.arrow.file.ParquetFile(path: pathlib.Path, _start: int = 0, _take: Union[int, NoneType] = None, _row_group_sizes: List[int] = <factory>)[source]#

Bases: gluonts.dataset.arrow.file.File

location_for(idx)[source]#
metadata() Dict[str, str][source]#
path: pathlib.Path#
reader: pyarrow.parquet.core.ParquetFile#