diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml index 7ca805a..98e35c2 100644 --- a/.github/workflows/pre-commit.yaml +++ b/.github/workflows/pre-commit.yaml @@ -18,11 +18,11 @@ jobs: pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: token: ${{ secrets.GITHUB_TOKEN }} fetch-depth: 0 # get full git history - - uses: actions/setup-python@v3 + - uses: actions/setup-python@v5 with: cache: 'pip' - name: Install pre-commit @@ -30,7 +30,7 @@ jobs: pip install pre-commit - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v41 + uses: tj-actions/changed-files@v43 with: token: ${{ secrets.GITHUB_TOKEN }} - name: Run pre-commit diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 027d682..15dc856 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -6,32 +6,33 @@ on: - synchronize push: tags: - - 'v[0-9]+.[0-9]+.[0-9]+*' + - "v[0-9]+.[0-9]+.[0-9]+*" jobs: - test-suite: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [3.7, 3.8, 3.9, '3.10'] + python-version: [3.7, 3.8, 3.9, "3.10", "3.11"] os: [ubuntu-latest, macos-latest, windows-latest] steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} ${{ matrix.os }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt - pip install -r tests/requirements.txt - - name: Test with pytest - run: | - pytest - + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} ${{ matrix.os }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install -r tests/requirements.txt + - name: Test with pytest + run: | + pytest --cov-report= --cov=btrdb # suppress coverage report here + - name: Coverage Report + run: | + coverage report -m release: needs: @@ -39,7 +40,7 @@ jobs: if: startsWith(github.ref, 'refs/tags/') runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Create Release id: create_release uses: actions/create-release@v1 @@ -51,7 +52,6 @@ jobs: draft: false prerelease: false - deploy: needs: - release @@ -59,20 +59,20 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.8' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel build - - name: Build and publish - run: | + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel build + - name: Build and publish + run: | python -m build --sdist --wheel --outdir dist/ . - - name: Publish package - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_DEPLOYMENT_TOKEN }} + - name: Publish package + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_DEPLOYMENT_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6f327d1..2d8e501 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,26 +1,26 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace exclude: ^(setup.cfg|btrdb/grpcinterface) - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 24.3.0 hooks: - id: black-jupyter args: [--line-length=88] exclude: btrdb/grpcinterface/.*\.py - repo: https://github.com/pycqa/isort - rev: 5.11.5 + rev: 5.13.2 hooks: - id: isort name: isort (python) args: [--profile=black, --line-length=88] exclude: btrdb/grpcinterface/.*\.py - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 7.0.0 hooks: - id: flake8 args: [--config=setup.cfg] diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f6d54f..9d0c080 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,39 @@ # Changelog + +## 5.32.0 - YYYY-MM-DD + +### What's Changed +- Initial docstring overhaul and a new test for better documentation and test coverage. by @JustinGilmer in #82 +- Test new join functionality for improved data loading for windowed queries. by @JustinGilmer in #80 +- Improve `arrow_to_dataframe` function for handling large amounts of columns, enhancing performance and usability. by @Jefflinf in #73 +- Expand testing to include Python 3.11, ensuring compatibility and stability. by @JustinGilmer in #74 +- Update exception handling to better support `RpcErrors`, improving error management and debugging. by @JustinGilmer in #72 +- Introduce an option for specifying the schema of the returned data, allowing for more flexibility in data handling. by @TODO in #51 +- Remove non-required dependencies and migrate to 'data' optional dependency for a lighter package and easier installation. by @JustinGilmer in #71 +- New method to get first and last timestamps from `aligned_windows`, enhancing data analysis capabilities. by @Jefflinf in #70 +- Add `to_timedelta` method for `pointwidth` class, providing more options for time-based data manipulation. by @Jefflinf in #69 + +### Fixed +- Fix `NoneType` error for `earliest/latest` for empty streams, ensuring reliability and error handling. by @Jefflinf in #64 +- Correct integration tests where the time column is not automatically set as the index, improving test accuracy and reliability. by @JustinGilmer in #56 + +### Deprecated +- FutureWarning for `streams_in_collection` to return `StreamSet` in the future, preparing users for upcoming API changes. by @Jefflinf in #60 + +**Full Changelog**: [GitHub compare view](https://github.com/PingThingsIO/btrdb-python/compare/v5.31.0...v5.32.0) + + +## 5.31.0 +## What's Changed +* Release v5.30.2 by @youngale-pingthings in https://github.com/PingThingsIO/btrdb-python/pull/42 +* Have release script update pyproject.toml file by @youngale-pingthings in https://github.com/PingThingsIO/btrdb-python/pull/48 +* Provide option to sort the arrow tables by @justinGilmer in https://github.com/PingThingsIO/btrdb-python/pull/47 +* [sc-25841] Remove 4MB limit for gRPC message payloads by @justinGilmer in https://github.com/PingThingsIO/btrdb-python/pull/49 +* Update documentation for arrow methods by @justinGilmer in https://github.com/PingThingsIO/btrdb-python/pull/50 +* Update from staging by @justinGilmer in https://github.com/PingThingsIO/btrdb-python/pull/54 +* Sort tables by time by default for any `pyarrow` tables. by @justinGilmer in +* Fix deprecation warnings for pip installations. by @jleifnf in + ## 5.30.2 ### What's Changed * Update readthedocs to new yaml for testing. by @justinGilmer in https://github.com/PingThingsIO/btrdb-python/pull/40 diff --git a/btrdb/__init__.py b/btrdb/__init__.py index efcb686..1584b3b 100644 --- a/btrdb/__init__.py +++ b/btrdb/__init__.py @@ -52,18 +52,18 @@ def connect(conn_str=None, apikey=None, profile=None, shareable=False): Parameters ---------- conn_str: str, default=None - The address and port of the cluster to connect to, e.g. `192.168.1.1:4411`. - If set to None, will look in the environment variable `$BTRDB_ENDPOINTS` + The address and port of the cluster to connect to, e.g. ``192.168.1.1:4411``. + If set to None, will look in the environment variable ``$BTRDB_ENDPOINTS`` (recommended). apikey: str, default=None The API key used to authenticate requests (optional). If None, the key - is looked up from the environment variable `$BTRDB_API_KEY`. + is looked up from the environment variable ``$BTRDB_API_KEY``. profile: str, default=None The name of a profile containing the required connection information as found in the user's predictive grid credentials file - `~/.predictivegrid/credentials.yaml`. + ``~/.predictivegrid/credentials.yaml``. shareable: bool, default=False - Whether or not the connection can be "shared" in a distributed setting such + Whether the connection can be "shared" in a distributed setting such as Ray workers. If set to True, the connection can be serialized and sent to other workers so that data can be retrieved in parallel; **however**, this is less secure because it is possible for other users of the Ray cluster to @@ -74,6 +74,25 @@ def connect(conn_str=None, apikey=None, profile=None, shareable=False): db : BTrDB An instance of the BTrDB context to directly interact with the database. + Examples + -------- + This example looks for the env variables: ``BTRDB_ENDPOINTS`` and ``BTRDB_API_KEY``. + + >>> conn = btrdb.connect() + + + + Connect to the platform by looking for the relevant platform profile + in ``${HOME}/.predictivegrid/credentials.yaml`` if the file is present. + + >>> conn = btrdb.connect(profile='test') + + + If you provide incorrect credentials, you will get an error. + + >>> conn = btrdb.connect(conn_str="192.168.1.1:4411", apikey="NONSENSICAL_API_KEY") + + """ # do not allow user to provide both address and profile if conn_str and profile: diff --git a/btrdb/conn.py b/btrdb/conn.py index a77eced..0e9b5fc 100644 --- a/btrdb/conn.py +++ b/btrdb/conn.py @@ -21,13 +21,14 @@ import re import uuid as uuidlib from concurrent.futures import ThreadPoolExecutor -from typing import List +from typing import List, Tuple, Union +from warnings import warn import certifi import grpc from grpc._cython.cygrpc import CompressionAlgorithm -from btrdb.exceptions import InvalidOperation, StreamNotFoundError, retry +from btrdb.exceptions import BTrDBError, InvalidOperation, StreamNotFoundError, retry from btrdb.stream import Stream, StreamSet from btrdb.utils.conversion import to_uuid from btrdb.utils.general import unpack_stream_descriptor @@ -54,11 +55,15 @@ def __init__(self, addrportstr, apikey=None): Parameters ---------- - addrportstr: str, required + addrportstr : str, required The address of the cluster to connect to, e.g 123.123.123:4411 - apikey: str, optional + apikey : str, optional The optional API key to authenticate requests + Notes + ----- + The ``btrdb.connect`` method is a helper function to make connecting to the platform easier + usually that will be sufficient for most users. """ addrport = addrportstr.split(":", 2) # 100MB size limit ~ 2500 streams for 5000 points with each point being 64bit @@ -196,6 +201,10 @@ class BTrDB(object): def __init__(self, endpoint): self.ep = endpoint + try: + _ = self.ep.info() + except Exception as err: + raise BTrDBError(f"Could not connect to the database, error message: {err}") self._executor = ThreadPoolExecutor() try: self._ARROW_ENABLED = _is_arrow_enabled(self.ep.info()) @@ -207,8 +216,8 @@ def __init__(self, endpoint): @retry def query( self, - stmt, - params=None, + stmt: str, + params: Union[Tuple[str], List[str]] = None, auto_retry=False, retries=5, retry_delay=3, @@ -220,23 +229,23 @@ def query( Parameters ---------- - stmt: str + stmt : str a SQL statement to be executed on the BTrDB metadata. Available tables are noted below. To sanitize inputs use a `$1` style parameter such as `select * from streams where name = $1 or name = $2`. - params: list or tuple + params : list or tuple a list of parameter values to be sanitized and interpolated into the SQL statement. Using parameters forces value/type checking and is considered a best practice at the very least. - auto_retry: bool, default: False + auto_retry : bool, default: False Whether to retry this request in the event of an error - retries: int, default: 5 + retries : int, default: 5 Number of times to retry this request if there is an error. Will be ignored if auto_retry is False - retry_delay: int, default: 3 + retry_delay : int, default: 3 initial time to wait before retrying function call if there is an error. Will be ignored if auto_retry is False - retry_backoff: int, default: 4 + retry_backoff : int, default: 4 Exponential factor by which the backoff increases between retries. Will be ignored if auto_retry is False @@ -249,10 +258,48 @@ def query( Notes ------- Parameters will be inserted into the SQL statement as noted by the - paramter number such as `$1`, `$2`, or `$3`. The `streams` table is + parameter number such as `$1`, `$2`, or `$3`. The `streams` table is available for `SELECT` statements only. See https://btrdb.readthedocs.io/en/latest/ for more info. + + The following are the queryable columns in the postgres ``streams`` table. + + +------------------+------------------------+-----------+ + | Column | Type | Nullable | + +==================+========================+===========+ + | uuid | uuid | not null | + +------------------+------------------------+-----------+ + | collection | character varying(256) | not null | + +------------------+------------------------+-----------+ + | name | character varying(256) | not null | + +------------------+------------------------+-----------+ + | unit | character varying(256) | not null | + +------------------+------------------------+-----------+ + | ingress | character varying(256) | not null | + +------------------+------------------------+-----------+ + | property_version | bigint | not null | + +------------------+------------------------+-----------+ + | annotations | hstore | | + +------------------+------------------------+-----------+ + + Examples + -------- + Count all streams in the platform. + + >>> conn = btrdb.connect() + >>> conn.query("SELECT COUNT(uuid) FROM streams") + [{'count': ...}] + + Count all streams in the collection ``foo/bar`` by passing in the variable as a parameter. + + >>> conn.query("SELECT COUNT(uuid) FROM streams WHERE collection=$1::text", params=["foo/bar"]) + [{'count': ...}] + + Count all streams in the platform that has a non-null entry for the metadata annotation ``foo``. + + >>> conn.query("SELECT COUNT(uuid) FROM streams WHERE annotations->$1::text IS NOT NULL", params=["foo"]) + [{'count': ...}] """ if params is None: params = list() @@ -266,18 +313,56 @@ def streams(self, *identifiers, versions=None, is_collection_prefix=False): """ Returns a StreamSet object with BTrDB streams from the supplied identifiers. If any streams cannot be found matching the identifier - than StreamNotFoundError will be returned. + then a ``StreamNotFoundError`` will be returned. Parameters ---------- - identifiers: str or UUID + identifiers : str or UUID a single item or iterable of items which can be used to query for - streams. identiers are expected to be UUID as string, UUID as UUID, + streams. Identifiers are expected to be UUID as string, UUID as UUID, or collection/name string. - versions: list[int] + versions : list[int] a single or iterable of version numbers to match the identifiers + is_collection_prefix : bool, default=False + If providing a collection string, is that string just a prefix, or the entire collection name? + This will impact how many streams are returned. + + + Returns + ------- + :class:`StreamSet` + Collection of streams. + + Examples + -------- + With a sequence of uuids. + + >>> conn = btrdb.connect() + >>> conn.streams(identifiers=list_of_uuids) + + + With a sequence of uuids and version numbers. + Here we are using version 0 to use the latest data points. + + >>> conn.streams(identifiers=list_of_uuids, versions=[0 for _ in list_of_uuids]) + + + Filtering by ``collection`` prefix ``"foo"`` where multiple collections exist like the following: + ``foo/bar``, ``foo/baz``, ``foo/bar/new``, and ``foo``. + If we set `is_collection_prefix`` to ``True``, this will return all streams that exist in the collections defined above. + It is similar to a regex pattern ``^foo.*`` for matching purposes. + + >>> conn.streams(identifiers="foo", is_collection_prefix=True) + + + If you set ``is_collection_prefix`` to ``False``, this will assume that the string identifier you provide is the full collection name. + Matching like the regex here: ``^foo`` + + >>> conn.streams(identifiers="foo", is_collection_prefix=False) + + """ if versions is not None and not isinstance(versions, list): raise TypeError("versions argument must be of type list") @@ -337,7 +422,7 @@ def stream_from_uuid(self, uuid): Parameters ---------- - uuid: UUID + uuid : UUID The uuid of the requested stream. Returns @@ -345,6 +430,16 @@ def stream_from_uuid(self, uuid): Stream instance of Stream class or None + Examples + -------- + + >>> import btrdb + >>> conn = btrdb.connect() + >>> uuid = "f98f4b4e-9fab-46b5-8a80-f282059d69b1" + >>> stream = conn.stream_from_uuid(uuid) + >>> stream + + """ return Stream(self, to_uuid(uuid)) @@ -365,23 +460,23 @@ def create( Parameters ---------- - uuid: UUID, required + uuid : UUID, required The uuid of the requested stream. - collection: str, required + collection : str, required The collection string prefix that the stream will belong to. - tags: dict, required - The tags-level immutable metadata key:value pairs. - annotations: dict, optional + tags : dict, required + The tags-level metadata key:value pairs. + annotations : dict, optional The mutable metadata of the stream, key:value pairs - auto_retry: bool, default: False + auto_retry : bool, default: False Whether to retry this request in the event of an error - retries: int, default: 5 + retries : int, default: 5 Number of times to retry this request if there is an error. Will be ignored if auto_retry is False - retry_delay: int, default: 3 + retry_delay : int, default: 3 initial time to wait before retrying function call if there is an error. Will be ignored if auto_retry is False - retry_backoff: int, default: 4 + retry_backoff : int, default: 4 Exponential factor by which the backoff increases between retries. Will be ignored if auto_retry is False @@ -389,6 +484,18 @@ def create( ------- Stream instance of Stream class + + + Examples + -------- + >>> import btrdb + >>> from uuid import uuid4 # this generates a random uuid + >>> conn = btrdb.connect() + >>> collection = "new/stream/collection" + >>> tags = {"name":"foo", "unit":"V"} + >>> annotations = {"bar": "baz"} + >>> s = conn.create(uuid=uuid4(), tags=tags, annotations=annotations, collection=collection) + """ if tags is None: @@ -405,17 +512,28 @@ def create( collection=collection, tags=tags.copy(), annotations=annotations.copy(), - property_version=0, + property_version=1, ) def info(self): """ - Returns information about the connected BTrDB server. + Returns information about the platform proxy server. Returns ------- dict - server connection and status information + Proxy server connection and status information + + Examples + -------- + >>> conn = btrdb.connect() + >>> conn.info() + { + .. 'majorVersion': 5, + .. 'minorVersion': 8, + .. 'build': ..., + .. 'proxy': ..., + } """ info = self.ep.info() @@ -433,13 +551,27 @@ def list_collections(self, starts_with=""): Parameters ---------- - starts_with: str, optional, default = '' + starts_with : str, optional, default: '' Filter collections that start with the string provided, if none passed, will list all collections. Returns ------- collections: List[str] + Examples + -------- + + Assuming we have the following collections in the platform: + ``foo``, ``bar``, ``foo/baz``, ``bar/baz`` + + >>> conn = btrdb.connect() + >>> conn.list_collections().sort() + ["bar", "bar/baz", "foo", "foo/bar"] + + >>> conn.list_collections(starts_with="foo") + ["foo", "foo/bar"] + + """ return [c for some in self.ep.listCollections(starts_with) for c in some] @@ -464,13 +596,25 @@ def list_unique_annotations(self, collection=None): Returns a list of annotation keys used in a given collection prefix. Parameters - ------- - collection: str + ---------- + collection : str Prefix of the collection to filter. Returns ------- - annotations: list[str] + annotations : list[str] + + Notes + ----- + This query treats the ``collection`` string as a prefix, so ``collection="foo"`` will match with the following wildcard syntax ``foo%``. + If you only want to filter for a single collection, you will need to provide the full collection, if there are other collections + that match the ``foo%`` pattern, you might need to use a custom SQL query using ``conn.query``. + + Examples + -------- + >>> conn.list_unique_annotations(collection="sunshine/PMU1") + ['foo', 'location', 'impedance'] + """ return self._list_unique_tags_annotations("annotations", collection) @@ -479,13 +623,28 @@ def list_unique_names(self, collection=None): Returns a list of names used in a given collection prefix. Parameters - ------- - collection: str + ---------- + collection : str Prefix of the collection to filter. Returns ------- - names: list[str] + names : list[str] + + Examples + -------- + Can specify a full ``collection`` name. + + >>> conn.list_unique_names(collection="sunshine/PMU1") + ['C1ANG', 'C1MAG', 'C2ANG', 'C2MAG', 'C3ANG', 'C3MAG', 'L1ANG', 'L1MAG', 'L2ANG', 'L2MAG', 'L3ANG', 'L3MAG', 'LSTATE'] + + And also provide a ``collection`` prefix. + + >>> conn.list_unique_names(collection="sunshine/") + ['C1ANG', 'C1MAG', 'C2ANG', 'C2MAG', 'C3ANG', 'C3MAG', 'L1ANG', 'L1MAG', 'L2ANG', 'L2MAG', 'L3ANG', 'L3MAG', 'LSTATE'] + + + """ return self._list_unique_tags_annotations("name", collection) @@ -494,13 +653,21 @@ def list_unique_units(self, collection=None): Returns a list of units used in a given collection prefix. Parameters - ------- - collection: str + ---------- + collection : str Prefix of the collection to filter. Returns ------- - units: list[str] + units : list[str] + + + Examples + -------- + + >>> conn.list_unique_units(collection="sunshine/PMU1") + ['amps', 'deg', 'mask', 'volts'] + """ return self._list_unique_tags_annotations("unit", collection) @@ -523,31 +690,54 @@ def streams_in_collection( Parameters ---------- - collection: str + collection : str collections to use when searching for streams, case sensitive. - is_collection_prefix: bool + is_collection_prefix : bool Whether the collection is a prefix. - tags: Dict[str, str] + tags : Dict[str, str] The tags to identify the stream. - annotations: Dict[str, str] + annotations : Dict[str, str] The annotations to identify the stream. - auto_retry: bool, default: False + auto_retry : bool, default: False Whether to retry this request in the event of an error - retries: int, default: 5 + retries : int, default: 5 Number of times to retry this request if there is an error. Will be ignored if auto_retry is False - retry_delay: int, default: 3 + retry_delay : int, default: 3 initial time to wait before retrying function call if there is an error. Will be ignored if auto_retry is False - retry_backoff: int, default: 4 + retry_backoff : int, default: 4 Exponential factor by which the backoff increases between retries. Will be ignored if auto_retry is False Returns ------ - list - A list of stream objects found with the provided search arguments. + list[Stream] + A list of ``Stream`` objects found with the provided search arguments. + + + .. note:: + + In a future release, the default return value of this function will be a ``StreamSet`` + + Examples + -------- + >>> conn = btrdb.connect() + >>> conn.streams_in_collection(collection="foo", is_collection_prefix=True) + [, , ] + + >>> conn.streams_in_collection(collection="foo", is_collection_prefix=False) + [, ] + + >>> conn.streams_in_collection(collection="foo", + ... is_collection_prefix=False, tags={"unit":"Volts"}) + [] + + >>> conn.streams_in_collection(collection="foo", + ... is_collection_prefix=False, tags={"unit":"UNKNOWN"}) + [] """ result = [] @@ -578,7 +768,12 @@ def streams_in_collection( property_version=desc.propertyVersion, ) ) - + # TODO: In future release update this method to return a streamset object. + warn( + "StreamSet will be the default return object for ``streams_in_collection`` in a future release.", + FutureWarning, + stacklevel=2, + ) return result @retry @@ -592,21 +787,21 @@ def collection_metadata( ): """ Gives statistics about metadata for collections that match a - prefix. + ``prefix``. Parameters ---------- - prefix: str, required + prefix : str, required A prefix of the collection names to look at - auto_retry: bool, default: False + auto_retry : bool, default: False Whether to retry this request in the event of an error - retries: int, default: 5 + retries : int, default: 5 Number of times to retry this request if there is an error. Will be ignored if auto_retry is False - retry_delay: int, default: 3 + retry_delay : int, default: 3 initial time to wait before retrying function call if there is an error. Will be ignored if auto_retry is False - retry_backoff: int, default: 4 + retry_backoff : int, default: 4 Exponential factor by which the backoff increases between retries. Will be ignored if auto_retry is False @@ -616,6 +811,16 @@ def collection_metadata( A tuple of dictionaries containing metadata on the streams in the provided collection. + Examples + -------- + >>> conn.collection_metadata("sunshine/PMU1") + ({'name': 0, 'unit': 0, 'ingress': 0, 'distiller': 0}, + .. {'foo': 1, 'impedance': 12, 'location': 12}) + + >>> conn.collection_metadata("sunshine/") + ({'name': 0, 'unit': 0, 'ingress': 0, 'distiller': 0}, + .. {'foo': 1, 'impedance': 72, 'location': 72}) + """ ep = self.ep tags, annotations = ep.getMetadataUsage(prefix) diff --git a/btrdb/endpoint.py b/btrdb/endpoint.py index 79a3b72..c632bbb 100644 --- a/btrdb/endpoint.py +++ b/btrdb/endpoint.py @@ -28,7 +28,7 @@ import typing import uuid -from btrdb.exceptions import BTrDBError, check_proto_stat, error_handler +from btrdb.exceptions import check_proto_stat, error_handler from btrdb.grpcinterface import btrdb_pb2, btrdb_pb2_grpc from btrdb.point import RawPoint from btrdb.utils.general import unpack_stream_descriptor @@ -38,6 +38,8 @@ except ImportError: pa = None +_ARROW_IMPORT_MSG = """Package `pyarrow` required, please pip install.""" + class Endpoint(object): """Server endpoint where we make specific requests.""" @@ -55,9 +57,21 @@ def rawValues(self, uu, start, end, version=0): yield result.values, result.versionMajor @error_handler - def arrowRawValues(self, uu, start, end, version=0): - params = btrdb_pb2.RawValuesParams( - uuid=uu.bytes, start=start, end=end, versionMajor=version + def arrowRawValues(self, uu, start, end, version=0, schema=None): + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) + templateBytes = b"" + if schema is not None: + byte_io = io.BytesIO() + with pa.ipc.new_stream(sink=byte_io, schema=schema) as _: + pass + templateBytes = byte_io.getvalue() + params = btrdb_pb2.ArrowRawValuesParams( + uuid=uu.bytes, + start=start, + end=end, + versionMajor=version, + templateBytes=templateBytes, ) for result in self.stub.ArrowRawValues(params): check_proto_stat(result.stat) @@ -65,13 +79,24 @@ def arrowRawValues(self, uu, start, end, version=0): yield reader.read_all(), result.versionMajor @error_handler - def arrowMultiValues(self, uu_list, start, end, version_list, snap_periodNS): + def arrowMultiValues( + self, uu_list, start, end, version_list, snap_periodNS=None, schema=None + ): + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) + templateBytes = b"" + if schema is not None: + byte_io = io.BytesIO() + with pa.ipc.new_stream(sink=byte_io, schema=schema) as _: + pass + templateBytes = byte_io.getvalue() params = btrdb_pb2.ArrowMultiValuesParams( uuid=[uu.bytes for uu in uu_list], start=start, end=end, versionMajor=[ver for ver in version_list], snapPeriodNs=int(snap_periodNS), + templateBytes=templateBytes, ) for result in self.stub.ArrowMultiValues(params): check_proto_stat(result.stat) @@ -80,6 +105,8 @@ def arrowMultiValues(self, uu_list, start, end, version_list, snap_periodNS): @error_handler def arrowInsertValues(self, uu: uuid.UUID, values: pa.Table, policy: str): + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) policy_map = { "never": btrdb_pb2.MergePolicy.NEVER, "equal": btrdb_pb2.MergePolicy.EQUAL, @@ -115,6 +142,8 @@ def alignedWindows(self, uu, start, end, pointwidth, version=0): @error_handler def arrowAlignedWindows(self, uu, start, end, pointwidth, version=0): + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) params = btrdb_pb2.AlignedWindowsParams( uuid=uu.bytes, start=start, @@ -143,6 +172,8 @@ def windows(self, uu, start, end, width, depth, version=0): @error_handler def arrowWindows(self, uu, start, end, width, depth, version=0): + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) params = btrdb_pb2.WindowsParams( uuid=uu.bytes, start=start, @@ -386,3 +417,25 @@ def sql_query(self, stmt, params: typing.List): for page in self.stub.SQLQuery(request): check_proto_stat(page.stat) yield page.SQLQueryRow + + @error_handler + def subscribe(self, update_queue): + def updates(): + while True: + update = update_queue.get() + if update is None: + return + (to_add, to_remove) = update + if len(to_add) != 0: + yield btrdb_pb2.SubscriptionUpdate( + op=0, uuid=[uu.bytes for uu in to_add] + ) + if len(to_remove) != 0: + yield btrdb_pb2.SubscriptionUpdate( + op=1, uuid=[uu.bytes for uu in to_remove] + ) + + for response in self.stub.Subscribe(updates()): + check_proto_stat(response.stat) + with pa.ipc.open_stream(response.arrowBytes) as reader: + yield uuid.UUID(bytes=response.uuid), reader.read_all() diff --git a/btrdb/exceptions.py b/btrdb/exceptions.py index 9777679..8507f7c 100644 --- a/btrdb/exceptions.py +++ b/btrdb/exceptions.py @@ -117,18 +117,21 @@ def handle_grpc_error(err): Parameters ---------- - err: grpc.RpcError + err: Union[grpc.RpcError, btrdb.BTrDBError] """ details = err.details() - if details == "[404] stream does not exist": - raise StreamNotFoundError("Stream not found with provided uuid") from None - elif details == "failed to connect to all addresses": - raise ConnectionError("Failed to connect to BTrDB") from None - elif any(str(e) in err.details() for e in BTRDB_SERVER_ERRORS): - raise BTRDBServerError("An error has occured with btrdb-server") from None - elif str(err.code()) == "StatusCode.PERMISSION_DENIED": - raise PermissionDenied(details) from None - raise BTrDBError(details) from None + if isinstance(err, BTrDBError): + if details == "[404] stream does not exist": + raise StreamNotFoundError("Stream not found with provided uuid") from None + elif details == "failed to connect to all addresses": + raise ConnectionError("Failed to connect to BTrDB") from None + elif any(str(e) in err.details() for e in BTRDB_SERVER_ERRORS): + raise BTRDBServerError("An error has occured with btrdb-server") from None + elif str(err.code()) == "StatusCode.PERMISSION_DENIED": + raise PermissionDenied(details) from None + raise BTrDBError(details) from None + else: + raise err def check_proto_stat(stat): @@ -145,7 +148,7 @@ def check_proto_stat(stat): if code in BTRDB_ERRORS: raise BTRDB_ERRORS[code](stat.msg) elif code in BTRDB_SERVER_ERRORS: - raise BTRDBServerError(stat.msg) + raise BTRDBServerError(str(code) + ": " + stat.msg) raise BTrDBError(stat.msg) diff --git a/btrdb/grpcinterface/btrdb.proto b/btrdb/grpcinterface/btrdb.proto index 884a71c..1e147d6 100644 --- a/btrdb/grpcinterface/btrdb.proto +++ b/btrdb/grpcinterface/btrdb.proto @@ -4,7 +4,7 @@ package v5api; service BTrDB { rpc RawValues(RawValuesParams) returns (stream RawValuesResponse); - rpc ArrowRawValues(RawValuesParams) returns (stream ArrowRawValuesResponse); + rpc ArrowRawValues(ArrowRawValuesParams) returns (stream ArrowRawValuesResponse); rpc ArrowMultiValues(ArrowMultiValuesParams) returns (stream ArrowMultiValuesResponse); rpc AlignedWindows(AlignedWindowsParams) returns (stream AlignedWindowsResponse); rpc ArrowAlignedWindows(AlignedWindowsParams) returns (stream ArrowAlignedWindowsResponse); @@ -28,6 +28,7 @@ service BTrDB { rpc GetMetadataUsage(MetadataUsageParams) returns (MetadataUsageResponse); rpc GenerateCSV(GenerateCSVParams) returns (stream GenerateCSVResponse); rpc SQLQuery(SQLQueryParams) returns (stream SQLQueryResponse); + rpc Subscribe(stream SubscriptionUpdate) returns (stream SubscriptionResp); //rpc SetCompactionConfig(SetCompactionConfigParams) returns (SetCompactionConfigResponse); //rpc GetCompactionConfig(GetCompactionConfigParams) returns (GetCompactionConfigResponse); } @@ -44,6 +45,13 @@ message RawValuesResponse { uint64 versionMinor = 3; repeated RawPoint values = 4; } +message ArrowRawValuesParams { + bytes uuid = 1; + sfixed64 start = 2; + sfixed64 end = 3; + uint64 versionMajor = 4; + bytes templateBytes = 5; +} message ArrowRawValuesResponse { Status stat = 1; uint64 versionMajor = 2; @@ -56,6 +64,7 @@ message ArrowMultiValuesParams { sfixed64 start = 3; sfixed64 end = 4; int64 snapPeriodNs = 5; + bytes templateBytes = 6; } message ArrowMultiValuesResponse { Status stat = 1; @@ -426,3 +435,19 @@ message ReducedResolutionRange { int64 End = 2; uint32 Resolution = 3; } + +enum SubscriptionUpdateOp { + ADD_UUIDS = 0; + REMOVE_UUIDS = 1; +} + +message SubscriptionUpdate { + SubscriptionUpdateOp op = 1; + repeated bytes uuid = 2; +} + +message SubscriptionResp { + Status stat = 1; + bytes uuid = 2; + bytes arrowBytes = 3; +} diff --git a/btrdb/grpcinterface/btrdb_pb2.py b/btrdb/grpcinterface/btrdb_pb2.py index bed2b0d..9dcef1a 100644 --- a/btrdb/grpcinterface/btrdb_pb2.py +++ b/btrdb/grpcinterface/btrdb_pb2.py @@ -1,11 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: btrdb.proto +# Protobuf Python Version: 4.25.0 """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -13,152 +14,159 @@ +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x62trdb.proto\x12\x05v5api\"Q\n\x0fRawValuesParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x04 \x01(\x04\"}\n\x11RawValuesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x1f\n\x06values\x18\x04 \x03(\x0b\x32\x0f.v5api.RawPoint\"m\n\x14\x41rrowRawValuesParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x04 \x01(\x04\x12\x15\n\rtemplateBytes\x18\x05 \x01(\x0c\"u\n\x16\x41rrowRawValuesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x12\n\narrowBytes\x18\x04 \x01(\x0c\"\x85\x01\n\x16\x41rrowMultiValuesParams\x12\x0c\n\x04uuid\x18\x01 \x03(\x0c\x12\x14\n\x0cversionMajor\x18\x02 \x03(\x04\x12\r\n\x05start\x18\x03 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x10\x12\x14\n\x0csnapPeriodNs\x18\x05 \x01(\x03\x12\x15\n\rtemplateBytes\x18\x06 \x01(\x0c\"K\n\x18\x41rrowMultiValuesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x12\n\narrowBytes\x18\x02 \x01(\x0c\"*\n\x0bRawPointVec\x12\x0c\n\x04time\x18\x01 \x01(\x10\x12\r\n\x05value\x18\x02 \x03(\x01\"j\n\x14\x41lignedWindowsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x04 \x01(\x04\x12\x12\n\npointWidth\x18\x05 \x01(\r\"\x83\x01\n\x16\x41lignedWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12 \n\x06values\x18\x04 \x03(\x0b\x32\x10.v5api.StatPoint\"z\n\x1b\x41rrowAlignedWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x12\n\narrowBytes\x18\x04 \x01(\x0c\"m\n\rWindowsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x04 \x01(\x04\x12\r\n\x05width\x18\x05 \x01(\x04\x12\r\n\x05\x64\x65pth\x18\x06 \x01(\r\"|\n\x0fWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12 \n\x06values\x18\x04 \x03(\x0b\x32\x10.v5api.StatPoint\"s\n\x14\x41rrowWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x12\n\narrowBytes\x18\x04 \x01(\x0c\"h\n\x10StreamInfoParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x13\n\x0bomitVersion\x18\x02 \x01(\x08\x12\x16\n\x0eomitDescriptor\x18\x03 \x01(\x08\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"\x8a\x01\n\x12StreamInfoResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12+\n\ndescriptor\x18\x04 \x01(\x0b\x32\x17.v5api.StreamDescriptor\"\x98\x01\n\x10StreamDescriptor\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x12\n\ncollection\x18\x02 \x01(\t\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\'\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x17\n\x0fpropertyVersion\x18\x05 \x01(\x04\"\x82\x01\n\x1aSetStreamAnnotationsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x1f\n\x17\x65xpectedPropertyVersion\x18\x02 \x01(\x04\x12#\n\x07\x63hanges\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x10\n\x08removals\x18\x04 \x03(\t\";\n\x1cSetStreamAnnotationsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"\x8a\x01\n\x13SetStreamTagsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x1f\n\x17\x65xpectedPropertyVersion\x18\x02 \x01(\x04\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06remove\x18\x05 \x03(\t\"4\n\x15SetStreamTagsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"{\n\x0c\x43reateParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x12\n\ncollection\x18\x02 \x01(\t\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\'\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x12.v5api.KeyOptValue\"-\n\x0e\x43reateResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"@\n\x13MetadataUsageParams\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"y\n\x15MetadataUsageResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x1d\n\x04tags\x18\x02 \x03(\x0b\x32\x0f.v5api.KeyCount\x12$\n\x0b\x61nnotations\x18\x03 \x03(\x0b\x32\x0f.v5api.KeyCount\"&\n\x08KeyCount\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x04\"B\n\x15ListCollectionsParams\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"K\n\x17ListCollectionsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x13\n\x0b\x63ollections\x18\x02 \x03(\t\"\xab\x01\n\x13LookupStreamsParams\x12\x12\n\ncollection\x18\x01 \x01(\t\x12\x1a\n\x12isCollectionPrefix\x18\x02 \x01(\x08\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\'\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"^\n\x15LookupStreamsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12(\n\x07results\x18\x02 \x03(\x0b\x32\x17.v5api.StreamDescriptor\"S\n\rNearestParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x0c\n\x04time\x18\x02 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x03 \x01(\x04\x12\x10\n\x08\x62\x61\x63kward\x18\x04 \x01(\x08\"z\n\x0fNearestResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x1e\n\x05value\x18\x04 \x01(\x0b\x32\x0f.v5api.RawPoint\"U\n\rChangesParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x11\n\tfromMajor\x18\x02 \x01(\x04\x12\x0f\n\x07toMajor\x18\x03 \x01(\x04\x12\x12\n\nresolution\x18\x04 \x01(\r\"\x7f\n\x0f\x43hangesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12#\n\x06ranges\x18\x04 \x03(\x0b\x32\x13.v5api.ChangedRange\"#\n\tRoundSpec\x12\x0e\n\x04\x62its\x18\x02 \x01(\x05H\x00\x42\x06\n\x04spec\"\x99\x01\n\x0cInsertParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x0c\n\x04sync\x18\x02 \x01(\x08\x12(\n\x0cmerge_policy\x18\x04 \x01(\x0e\x32\x12.v5api.MergePolicy\x12\"\n\x08rounding\x18\x05 \x01(\x0b\x32\x10.v5api.RoundSpec\x12\x1f\n\x06values\x18\x03 \x03(\x0b\x32\x0f.v5api.RawPoint\"\x91\x01\n\x11\x41rrowInsertParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x0c\n\x04sync\x18\x02 \x01(\x08\x12(\n\x0cmerge_policy\x18\x03 \x01(\x0e\x32\x12.v5api.MergePolicy\x12\"\n\x08rounding\x18\x04 \x01(\x0b\x32\x10.v5api.RoundSpec\x12\x12\n\narrowBytes\x18\x05 \x01(\x0c\"Y\n\x0eInsertResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\"8\n\x0c\x44\x65leteParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\"Y\n\x0e\x44\x65leteResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\"\x0c\n\nInfoParams\"\xa2\x01\n\x0cInfoResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x19\n\x04mash\x18\x02 \x01(\x0b\x32\x0b.v5api.Mash\x12\x14\n\x0cmajorVersion\x18\x03 \x01(\r\x12\x14\n\x0cminorVersion\x18\x04 \x01(\r\x12\r\n\x05\x62uild\x18\x05 \x01(\t\x12\x1f\n\x05proxy\x18\x06 \x01(\x0b\x32\x10.v5api.ProxyInfo\"#\n\tProxyInfo\x12\x16\n\x0eproxyEndpoints\x18\x01 \x03(\t\"1\n\x11\x46\x61ultInjectParams\x12\x0c\n\x04type\x18\x01 \x01(\x04\x12\x0e\n\x06params\x18\x02 \x01(\x0c\">\n\x13\x46\x61ultInjectResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\n\n\x02rv\x18\x02 \x01(\x0c\"\x1b\n\x0b\x46lushParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\"X\n\rFlushResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\" \n\x10ObliterateParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\"1\n\x12ObliterateResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"\'\n\x08RawPoint\x12\x0c\n\x04time\x18\x01 \x01(\x10\x12\r\n\x05value\x18\x02 \x01(\x01\"`\n\tStatPoint\x12\x0c\n\x04time\x18\x01 \x01(\x10\x12\x0b\n\x03min\x18\x02 \x01(\x01\x12\x0c\n\x04mean\x18\x03 \x01(\x01\x12\x0b\n\x03max\x18\x04 \x01(\x01\x12\r\n\x05\x63ount\x18\x05 \x01(\x06\x12\x0e\n\x06stddev\x18\x06 \x01(\x01\"*\n\x0c\x43hangedRange\x12\r\n\x05start\x18\x01 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x10\">\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12\x19\n\x04mash\x18\x03 \x01(\x0b\x32\x0b.v5api.Mash\"\x98\x01\n\x04Mash\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12\x0e\n\x06leader\x18\x02 \x01(\t\x12\x16\n\x0eleaderRevision\x18\x03 \x01(\x03\x12\x13\n\x0btotalWeight\x18\x04 \x01(\x03\x12\x0f\n\x07healthy\x18\x05 \x01(\x08\x12\x10\n\x08unmapped\x18\x06 \x01(\x01\x12\x1e\n\x07members\x18\x07 \x03(\x0b\x32\r.v5api.Member\"\xc3\x01\n\x06Member\x12\x0c\n\x04hash\x18\x01 \x01(\r\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\n\n\x02up\x18\x03 \x01(\x08\x12\n\n\x02in\x18\x04 \x01(\x08\x12\x0f\n\x07\x65nabled\x18\x05 \x01(\x08\x12\r\n\x05start\x18\x06 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x07 \x01(\x03\x12\x0e\n\x06weight\x18\x08 \x01(\x03\x12\x16\n\x0ereadPreference\x18\t \x01(\x01\x12\x15\n\rhttpEndpoints\x18\n \x01(\t\x12\x15\n\rgrpcEndpoints\x18\x0b \x01(\t\"8\n\x0bKeyOptValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x03val\x18\x02 \x01(\x0b\x32\x0f.v5api.OptValue\"\x19\n\x08OptValue\x12\r\n\x05value\x18\x01 \x01(\t\"&\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"?\n\x0fStreamCSVConfig\x12\x0f\n\x07version\x18\x01 \x01(\x04\x12\r\n\x05label\x18\x02 \x01(\t\x12\x0c\n\x04uuid\x18\x03 \x01(\x0c\"\x9d\x02\n\x11GenerateCSVParams\x12\x35\n\tqueryType\x18\x01 \x01(\x0e\x32\".v5api.GenerateCSVParams.QueryType\x12\x11\n\tstartTime\x18\x02 \x01(\x03\x12\x0f\n\x07\x65ndTime\x18\x03 \x01(\x03\x12\x12\n\nwindowSize\x18\x04 \x01(\x04\x12\r\n\x05\x64\x65pth\x18\x05 \x01(\r\x12\x17\n\x0fincludeVersions\x18\x06 \x01(\x08\x12\'\n\x07streams\x18\x07 \x03(\x0b\x32\x16.v5api.StreamCSVConfig\"H\n\tQueryType\x12\x19\n\x15\x41LIGNED_WINDOWS_QUERY\x10\x00\x12\x11\n\rWINDOWS_QUERY\x10\x01\x12\r\n\tRAW_QUERY\x10\x02\"Q\n\x13GenerateCSVResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x10\n\x08isHeader\x18\x02 \x01(\x08\x12\x0b\n\x03row\x18\x03 \x03(\t\"J\n\x0eSQLQueryParams\x12\r\n\x05query\x18\x01 \x01(\t\x12\x0e\n\x06params\x18\x02 \x03(\t\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"D\n\x10SQLQueryResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x13\n\x0bSQLQueryRow\x18\x02 \x03(\x0c\"\x14\n\x04Role\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x94\x01\n\x19SetCompactionConfigParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x18\n\x10\x43ompactedVersion\x18\x02 \x01(\x04\x12>\n\x17reducedResolutionRanges\x18\x03 \x03(\x0b\x32\x1d.v5api.ReducedResolutionRange\x12\x0f\n\x07unused0\x18\x04 \x01(\x04\":\n\x1bSetCompactionConfigResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\")\n\x19GetCompactionConfigParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\"\xc1\x01\n\x1bGetCompactionConfigResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x1a\n\x12LatestMajorVersion\x18\x02 \x01(\x04\x12\x18\n\x10\x43ompactedVersion\x18\x03 \x01(\x04\x12>\n\x17reducedResolutionRanges\x18\x04 \x03(\x0b\x32\x1d.v5api.ReducedResolutionRange\x12\x0f\n\x07unused0\x18\x05 \x01(\x04\"H\n\x16ReducedResolutionRange\x12\r\n\x05Start\x18\x01 \x01(\x03\x12\x0b\n\x03\x45nd\x18\x02 \x01(\x03\x12\x12\n\nResolution\x18\x03 \x01(\r\"K\n\x12SubscriptionUpdate\x12\'\n\x02op\x18\x01 \x01(\x0e\x32\x1b.v5api.SubscriptionUpdateOp\x12\x0c\n\x04uuid\x18\x02 \x03(\x0c\"Q\n\x10SubscriptionResp\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x0c\n\x04uuid\x18\x02 \x01(\x0c\x12\x12\n\narrowBytes\x18\x03 \x01(\x0c*<\n\x0bMergePolicy\x12\t\n\x05NEVER\x10\x00\x12\t\n\x05\x45QUAL\x10\x01\x12\n\n\x06RETAIN\x10\x02\x12\x0b\n\x07REPLACE\x10\x03*7\n\x14SubscriptionUpdateOp\x12\r\n\tADD_UUIDS\x10\x00\x12\x10\n\x0cREMOVE_UUIDS\x10\x01\x32\xfd\r\n\x05\x42TrDB\x12?\n\tRawValues\x12\x16.v5api.RawValuesParams\x1a\x18.v5api.RawValuesResponse0\x01\x12N\n\x0e\x41rrowRawValues\x12\x1b.v5api.ArrowRawValuesParams\x1a\x1d.v5api.ArrowRawValuesResponse0\x01\x12T\n\x10\x41rrowMultiValues\x12\x1d.v5api.ArrowMultiValuesParams\x1a\x1f.v5api.ArrowMultiValuesResponse0\x01\x12N\n\x0e\x41lignedWindows\x12\x1b.v5api.AlignedWindowsParams\x1a\x1d.v5api.AlignedWindowsResponse0\x01\x12X\n\x13\x41rrowAlignedWindows\x12\x1b.v5api.AlignedWindowsParams\x1a\".v5api.ArrowAlignedWindowsResponse0\x01\x12\x39\n\x07Windows\x12\x14.v5api.WindowsParams\x1a\x16.v5api.WindowsResponse0\x01\x12\x43\n\x0c\x41rrowWindows\x12\x14.v5api.WindowsParams\x1a\x1b.v5api.ArrowWindowsResponse0\x01\x12@\n\nStreamInfo\x12\x17.v5api.StreamInfoParams\x1a\x19.v5api.StreamInfoResponse\x12^\n\x14SetStreamAnnotations\x12!.v5api.SetStreamAnnotationsParams\x1a#.v5api.SetStreamAnnotationsResponse\x12I\n\rSetStreamTags\x12\x1a.v5api.SetStreamTagsParams\x1a\x1c.v5api.SetStreamTagsResponse\x12\x34\n\x06\x43reate\x12\x13.v5api.CreateParams\x1a\x15.v5api.CreateResponse\x12Q\n\x0fListCollections\x12\x1c.v5api.ListCollectionsParams\x1a\x1e.v5api.ListCollectionsResponse0\x01\x12K\n\rLookupStreams\x12\x1a.v5api.LookupStreamsParams\x1a\x1c.v5api.LookupStreamsResponse0\x01\x12\x37\n\x07Nearest\x12\x14.v5api.NearestParams\x1a\x16.v5api.NearestResponse\x12\x39\n\x07\x43hanges\x12\x14.v5api.ChangesParams\x1a\x16.v5api.ChangesResponse0\x01\x12\x34\n\x06Insert\x12\x13.v5api.InsertParams\x1a\x15.v5api.InsertResponse\x12>\n\x0b\x41rrowInsert\x12\x18.v5api.ArrowInsertParams\x1a\x15.v5api.InsertResponse\x12\x34\n\x06\x44\x65lete\x12\x13.v5api.DeleteParams\x1a\x15.v5api.DeleteResponse\x12.\n\x04Info\x12\x11.v5api.InfoParams\x1a\x13.v5api.InfoResponse\x12\x43\n\x0b\x46\x61ultInject\x12\x18.v5api.FaultInjectParams\x1a\x1a.v5api.FaultInjectResponse\x12\x31\n\x05\x46lush\x12\x12.v5api.FlushParams\x1a\x14.v5api.FlushResponse\x12@\n\nObliterate\x12\x17.v5api.ObliterateParams\x1a\x19.v5api.ObliterateResponse\x12L\n\x10GetMetadataUsage\x12\x1a.v5api.MetadataUsageParams\x1a\x1c.v5api.MetadataUsageResponse\x12\x45\n\x0bGenerateCSV\x12\x18.v5api.GenerateCSVParams\x1a\x1a.v5api.GenerateCSVResponse0\x01\x12<\n\x08SQLQuery\x12\x15.v5api.SQLQueryParams\x1a\x17.v5api.SQLQueryResponse0\x01\x12\x43\n\tSubscribe\x12\x19.v5api.SubscriptionUpdate\x1a\x17.v5api.SubscriptionResp(\x01\x30\x01\x62\x06proto3') -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x62trdb.proto\x12\x05v5api\"Q\n\x0fRawValuesParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x04 \x01(\x04\"}\n\x11RawValuesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x1f\n\x06values\x18\x04 \x03(\x0b\x32\x0f.v5api.RawPoint\"u\n\x16\x41rrowRawValuesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x12\n\narrowBytes\x18\x04 \x01(\x0c\"n\n\x16\x41rrowMultiValuesParams\x12\x0c\n\x04uuid\x18\x01 \x03(\x0c\x12\x14\n\x0cversionMajor\x18\x02 \x03(\x04\x12\r\n\x05start\x18\x03 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x10\x12\x14\n\x0csnapPeriodNs\x18\x05 \x01(\x03\"K\n\x18\x41rrowMultiValuesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x12\n\narrowBytes\x18\x02 \x01(\x0c\"*\n\x0bRawPointVec\x12\x0c\n\x04time\x18\x01 \x01(\x10\x12\r\n\x05value\x18\x02 \x03(\x01\"j\n\x14\x41lignedWindowsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x04 \x01(\x04\x12\x12\n\npointWidth\x18\x05 \x01(\r\"\x83\x01\n\x16\x41lignedWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12 \n\x06values\x18\x04 \x03(\x0b\x32\x10.v5api.StatPoint\"z\n\x1b\x41rrowAlignedWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x12\n\narrowBytes\x18\x04 \x01(\x0c\"m\n\rWindowsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x04 \x01(\x04\x12\r\n\x05width\x18\x05 \x01(\x04\x12\r\n\x05\x64\x65pth\x18\x06 \x01(\r\"|\n\x0fWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12 \n\x06values\x18\x04 \x03(\x0b\x32\x10.v5api.StatPoint\"s\n\x14\x41rrowWindowsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x12\n\narrowBytes\x18\x04 \x01(\x0c\"h\n\x10StreamInfoParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x13\n\x0bomitVersion\x18\x02 \x01(\x08\x12\x16\n\x0eomitDescriptor\x18\x03 \x01(\x08\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"\x8a\x01\n\x12StreamInfoResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12+\n\ndescriptor\x18\x04 \x01(\x0b\x32\x17.v5api.StreamDescriptor\"\x98\x01\n\x10StreamDescriptor\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x12\n\ncollection\x18\x02 \x01(\t\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\'\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x17\n\x0fpropertyVersion\x18\x05 \x01(\x04\"\x82\x01\n\x1aSetStreamAnnotationsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x1f\n\x17\x65xpectedPropertyVersion\x18\x02 \x01(\x04\x12#\n\x07\x63hanges\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x10\n\x08removals\x18\x04 \x03(\t\";\n\x1cSetStreamAnnotationsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"\x8a\x01\n\x13SetStreamTagsParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x1f\n\x17\x65xpectedPropertyVersion\x18\x02 \x01(\x04\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x12\n\ncollection\x18\x04 \x01(\t\x12\x0e\n\x06remove\x18\x05 \x03(\t\"4\n\x15SetStreamTagsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"{\n\x0c\x43reateParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x12\n\ncollection\x18\x02 \x01(\t\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\'\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x12.v5api.KeyOptValue\"-\n\x0e\x43reateResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"@\n\x13MetadataUsageParams\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"y\n\x15MetadataUsageResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x1d\n\x04tags\x18\x02 \x03(\x0b\x32\x0f.v5api.KeyCount\x12$\n\x0b\x61nnotations\x18\x03 \x03(\x0b\x32\x0f.v5api.KeyCount\"&\n\x08KeyCount\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x04\"B\n\x15ListCollectionsParams\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"K\n\x17ListCollectionsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x13\n\x0b\x63ollections\x18\x02 \x03(\t\"\xab\x01\n\x13LookupStreamsParams\x12\x12\n\ncollection\x18\x01 \x01(\t\x12\x1a\n\x12isCollectionPrefix\x18\x02 \x01(\x08\x12 \n\x04tags\x18\x03 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\'\n\x0b\x61nnotations\x18\x04 \x03(\x0b\x32\x12.v5api.KeyOptValue\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"^\n\x15LookupStreamsResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12(\n\x07results\x18\x02 \x03(\x0b\x32\x17.v5api.StreamDescriptor\"S\n\rNearestParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x0c\n\x04time\x18\x02 \x01(\x10\x12\x14\n\x0cversionMajor\x18\x03 \x01(\x04\x12\x10\n\x08\x62\x61\x63kward\x18\x04 \x01(\x08\"z\n\x0fNearestResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12\x1e\n\x05value\x18\x04 \x01(\x0b\x32\x0f.v5api.RawPoint\"U\n\rChangesParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x11\n\tfromMajor\x18\x02 \x01(\x04\x12\x0f\n\x07toMajor\x18\x03 \x01(\x04\x12\x12\n\nresolution\x18\x04 \x01(\r\"\x7f\n\x0f\x43hangesResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\x12#\n\x06ranges\x18\x04 \x03(\x0b\x32\x13.v5api.ChangedRange\"#\n\tRoundSpec\x12\x0e\n\x04\x62its\x18\x02 \x01(\x05H\x00\x42\x06\n\x04spec\"\x99\x01\n\x0cInsertParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x0c\n\x04sync\x18\x02 \x01(\x08\x12(\n\x0cmerge_policy\x18\x04 \x01(\x0e\x32\x12.v5api.MergePolicy\x12\"\n\x08rounding\x18\x05 \x01(\x0b\x32\x10.v5api.RoundSpec\x12\x1f\n\x06values\x18\x03 \x03(\x0b\x32\x0f.v5api.RawPoint\"\x91\x01\n\x11\x41rrowInsertParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x0c\n\x04sync\x18\x02 \x01(\x08\x12(\n\x0cmerge_policy\x18\x03 \x01(\x0e\x32\x12.v5api.MergePolicy\x12\"\n\x08rounding\x18\x04 \x01(\x0b\x32\x10.v5api.RoundSpec\x12\x12\n\narrowBytes\x18\x05 \x01(\x0c\"Y\n\x0eInsertResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\"8\n\x0c\x44\x65leteParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\r\n\x05start\x18\x02 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x10\"Y\n\x0e\x44\x65leteResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\"\x0c\n\nInfoParams\"\xa2\x01\n\x0cInfoResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x19\n\x04mash\x18\x02 \x01(\x0b\x32\x0b.v5api.Mash\x12\x14\n\x0cmajorVersion\x18\x03 \x01(\r\x12\x14\n\x0cminorVersion\x18\x04 \x01(\r\x12\r\n\x05\x62uild\x18\x05 \x01(\t\x12\x1f\n\x05proxy\x18\x06 \x01(\x0b\x32\x10.v5api.ProxyInfo\"#\n\tProxyInfo\x12\x16\n\x0eproxyEndpoints\x18\x01 \x03(\t\"1\n\x11\x46\x61ultInjectParams\x12\x0c\n\x04type\x18\x01 \x01(\x04\x12\x0e\n\x06params\x18\x02 \x01(\x0c\">\n\x13\x46\x61ultInjectResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\n\n\x02rv\x18\x02 \x01(\x0c\"\x1b\n\x0b\x46lushParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\"X\n\rFlushResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x14\n\x0cversionMajor\x18\x02 \x01(\x04\x12\x14\n\x0cversionMinor\x18\x03 \x01(\x04\" \n\x10ObliterateParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\"1\n\x12ObliterateResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\"\'\n\x08RawPoint\x12\x0c\n\x04time\x18\x01 \x01(\x10\x12\r\n\x05value\x18\x02 \x01(\x01\"`\n\tStatPoint\x12\x0c\n\x04time\x18\x01 \x01(\x10\x12\x0b\n\x03min\x18\x02 \x01(\x01\x12\x0c\n\x04mean\x18\x03 \x01(\x01\x12\x0b\n\x03max\x18\x04 \x01(\x01\x12\r\n\x05\x63ount\x18\x05 \x01(\x06\x12\x0e\n\x06stddev\x18\x06 \x01(\x01\"*\n\x0c\x43hangedRange\x12\r\n\x05start\x18\x01 \x01(\x10\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x10\">\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12\x19\n\x04mash\x18\x03 \x01(\x0b\x32\x0b.v5api.Mash\"\x98\x01\n\x04Mash\x12\x10\n\x08revision\x18\x01 \x01(\x03\x12\x0e\n\x06leader\x18\x02 \x01(\t\x12\x16\n\x0eleaderRevision\x18\x03 \x01(\x03\x12\x13\n\x0btotalWeight\x18\x04 \x01(\x03\x12\x0f\n\x07healthy\x18\x05 \x01(\x08\x12\x10\n\x08unmapped\x18\x06 \x01(\x01\x12\x1e\n\x07members\x18\x07 \x03(\x0b\x32\r.v5api.Member\"\xc3\x01\n\x06Member\x12\x0c\n\x04hash\x18\x01 \x01(\r\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\n\n\x02up\x18\x03 \x01(\x08\x12\n\n\x02in\x18\x04 \x01(\x08\x12\x0f\n\x07\x65nabled\x18\x05 \x01(\x08\x12\r\n\x05start\x18\x06 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x07 \x01(\x03\x12\x0e\n\x06weight\x18\x08 \x01(\x03\x12\x16\n\x0ereadPreference\x18\t \x01(\x01\x12\x15\n\rhttpEndpoints\x18\n \x01(\t\x12\x15\n\rgrpcEndpoints\x18\x0b \x01(\t\"8\n\x0bKeyOptValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x03val\x18\x02 \x01(\x0b\x32\x0f.v5api.OptValue\"\x19\n\x08OptValue\x12\r\n\x05value\x18\x01 \x01(\t\"&\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"?\n\x0fStreamCSVConfig\x12\x0f\n\x07version\x18\x01 \x01(\x04\x12\r\n\x05label\x18\x02 \x01(\t\x12\x0c\n\x04uuid\x18\x03 \x01(\x0c\"\x9d\x02\n\x11GenerateCSVParams\x12\x35\n\tqueryType\x18\x01 \x01(\x0e\x32\".v5api.GenerateCSVParams.QueryType\x12\x11\n\tstartTime\x18\x02 \x01(\x03\x12\x0f\n\x07\x65ndTime\x18\x03 \x01(\x03\x12\x12\n\nwindowSize\x18\x04 \x01(\x04\x12\r\n\x05\x64\x65pth\x18\x05 \x01(\r\x12\x17\n\x0fincludeVersions\x18\x06 \x01(\x08\x12\'\n\x07streams\x18\x07 \x03(\x0b\x32\x16.v5api.StreamCSVConfig\"H\n\tQueryType\x12\x19\n\x15\x41LIGNED_WINDOWS_QUERY\x10\x00\x12\x11\n\rWINDOWS_QUERY\x10\x01\x12\r\n\tRAW_QUERY\x10\x02\"Q\n\x13GenerateCSVResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x10\n\x08isHeader\x18\x02 \x01(\x08\x12\x0b\n\x03row\x18\x03 \x03(\t\"J\n\x0eSQLQueryParams\x12\r\n\x05query\x18\x01 \x01(\t\x12\x0e\n\x06params\x18\x02 \x03(\t\x12\x19\n\x04role\x18\x64 \x01(\x0b\x32\x0b.v5api.Role\"D\n\x10SQLQueryResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x13\n\x0bSQLQueryRow\x18\x02 \x03(\x0c\"\x14\n\x04Role\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x94\x01\n\x19SetCompactionConfigParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x18\n\x10\x43ompactedVersion\x18\x02 \x01(\x04\x12>\n\x17reducedResolutionRanges\x18\x03 \x03(\x0b\x32\x1d.v5api.ReducedResolutionRange\x12\x0f\n\x07unused0\x18\x04 \x01(\x04\":\n\x1bSetCompactionConfigResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\")\n\x19GetCompactionConfigParams\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\"\xc1\x01\n\x1bGetCompactionConfigResponse\x12\x1b\n\x04stat\x18\x01 \x01(\x0b\x32\r.v5api.Status\x12\x1a\n\x12LatestMajorVersion\x18\x02 \x01(\x04\x12\x18\n\x10\x43ompactedVersion\x18\x03 \x01(\x04\x12>\n\x17reducedResolutionRanges\x18\x04 \x03(\x0b\x32\x1d.v5api.ReducedResolutionRange\x12\x0f\n\x07unused0\x18\x05 \x01(\x04\"H\n\x16ReducedResolutionRange\x12\r\n\x05Start\x18\x01 \x01(\x03\x12\x0b\n\x03\x45nd\x18\x02 \x01(\x03\x12\x12\n\nResolution\x18\x03 \x01(\r*<\n\x0bMergePolicy\x12\t\n\x05NEVER\x10\x00\x12\t\n\x05\x45QUAL\x10\x01\x12\n\n\x06RETAIN\x10\x02\x12\x0b\n\x07REPLACE\x10\x03\x32\xb3\r\n\x05\x42TrDB\x12?\n\tRawValues\x12\x16.v5api.RawValuesParams\x1a\x18.v5api.RawValuesResponse0\x01\x12I\n\x0e\x41rrowRawValues\x12\x16.v5api.RawValuesParams\x1a\x1d.v5api.ArrowRawValuesResponse0\x01\x12T\n\x10\x41rrowMultiValues\x12\x1d.v5api.ArrowMultiValuesParams\x1a\x1f.v5api.ArrowMultiValuesResponse0\x01\x12N\n\x0e\x41lignedWindows\x12\x1b.v5api.AlignedWindowsParams\x1a\x1d.v5api.AlignedWindowsResponse0\x01\x12X\n\x13\x41rrowAlignedWindows\x12\x1b.v5api.AlignedWindowsParams\x1a\".v5api.ArrowAlignedWindowsResponse0\x01\x12\x39\n\x07Windows\x12\x14.v5api.WindowsParams\x1a\x16.v5api.WindowsResponse0\x01\x12\x43\n\x0c\x41rrowWindows\x12\x14.v5api.WindowsParams\x1a\x1b.v5api.ArrowWindowsResponse0\x01\x12@\n\nStreamInfo\x12\x17.v5api.StreamInfoParams\x1a\x19.v5api.StreamInfoResponse\x12^\n\x14SetStreamAnnotations\x12!.v5api.SetStreamAnnotationsParams\x1a#.v5api.SetStreamAnnotationsResponse\x12I\n\rSetStreamTags\x12\x1a.v5api.SetStreamTagsParams\x1a\x1c.v5api.SetStreamTagsResponse\x12\x34\n\x06\x43reate\x12\x13.v5api.CreateParams\x1a\x15.v5api.CreateResponse\x12Q\n\x0fListCollections\x12\x1c.v5api.ListCollectionsParams\x1a\x1e.v5api.ListCollectionsResponse0\x01\x12K\n\rLookupStreams\x12\x1a.v5api.LookupStreamsParams\x1a\x1c.v5api.LookupStreamsResponse0\x01\x12\x37\n\x07Nearest\x12\x14.v5api.NearestParams\x1a\x16.v5api.NearestResponse\x12\x39\n\x07\x43hanges\x12\x14.v5api.ChangesParams\x1a\x16.v5api.ChangesResponse0\x01\x12\x34\n\x06Insert\x12\x13.v5api.InsertParams\x1a\x15.v5api.InsertResponse\x12>\n\x0b\x41rrowInsert\x12\x18.v5api.ArrowInsertParams\x1a\x15.v5api.InsertResponse\x12\x34\n\x06\x44\x65lete\x12\x13.v5api.DeleteParams\x1a\x15.v5api.DeleteResponse\x12.\n\x04Info\x12\x11.v5api.InfoParams\x1a\x13.v5api.InfoResponse\x12\x43\n\x0b\x46\x61ultInject\x12\x18.v5api.FaultInjectParams\x1a\x1a.v5api.FaultInjectResponse\x12\x31\n\x05\x46lush\x12\x12.v5api.FlushParams\x1a\x14.v5api.FlushResponse\x12@\n\nObliterate\x12\x17.v5api.ObliterateParams\x1a\x19.v5api.ObliterateResponse\x12L\n\x10GetMetadataUsage\x12\x1a.v5api.MetadataUsageParams\x1a\x1c.v5api.MetadataUsageResponse\x12\x45\n\x0bGenerateCSV\x12\x18.v5api.GenerateCSVParams\x1a\x1a.v5api.GenerateCSVResponse0\x01\x12<\n\x08SQLQuery\x12\x15.v5api.SQLQueryParams\x1a\x17.v5api.SQLQueryResponse0\x01\x62\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'btrdb_pb2', globals()) +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'btrdb_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _MERGEPOLICY._serialized_start=6305 - _MERGEPOLICY._serialized_end=6365 - _RAWVALUESPARAMS._serialized_start=22 - _RAWVALUESPARAMS._serialized_end=103 - _RAWVALUESRESPONSE._serialized_start=105 - _RAWVALUESRESPONSE._serialized_end=230 - _ARROWRAWVALUESRESPONSE._serialized_start=232 - _ARROWRAWVALUESRESPONSE._serialized_end=349 - _ARROWMULTIVALUESPARAMS._serialized_start=351 - _ARROWMULTIVALUESPARAMS._serialized_end=461 - _ARROWMULTIVALUESRESPONSE._serialized_start=463 - _ARROWMULTIVALUESRESPONSE._serialized_end=538 - _RAWPOINTVEC._serialized_start=540 - _RAWPOINTVEC._serialized_end=582 - _ALIGNEDWINDOWSPARAMS._serialized_start=584 - _ALIGNEDWINDOWSPARAMS._serialized_end=690 - _ALIGNEDWINDOWSRESPONSE._serialized_start=693 - _ALIGNEDWINDOWSRESPONSE._serialized_end=824 - _ARROWALIGNEDWINDOWSRESPONSE._serialized_start=826 - _ARROWALIGNEDWINDOWSRESPONSE._serialized_end=948 - _WINDOWSPARAMS._serialized_start=950 - _WINDOWSPARAMS._serialized_end=1059 - _WINDOWSRESPONSE._serialized_start=1061 - _WINDOWSRESPONSE._serialized_end=1185 - _ARROWWINDOWSRESPONSE._serialized_start=1187 - _ARROWWINDOWSRESPONSE._serialized_end=1302 - _STREAMINFOPARAMS._serialized_start=1304 - _STREAMINFOPARAMS._serialized_end=1408 - _STREAMINFORESPONSE._serialized_start=1411 - _STREAMINFORESPONSE._serialized_end=1549 - _STREAMDESCRIPTOR._serialized_start=1552 - _STREAMDESCRIPTOR._serialized_end=1704 - _SETSTREAMANNOTATIONSPARAMS._serialized_start=1707 - _SETSTREAMANNOTATIONSPARAMS._serialized_end=1837 - _SETSTREAMANNOTATIONSRESPONSE._serialized_start=1839 - _SETSTREAMANNOTATIONSRESPONSE._serialized_end=1898 - _SETSTREAMTAGSPARAMS._serialized_start=1901 - _SETSTREAMTAGSPARAMS._serialized_end=2039 - _SETSTREAMTAGSRESPONSE._serialized_start=2041 - _SETSTREAMTAGSRESPONSE._serialized_end=2093 - _CREATEPARAMS._serialized_start=2095 - _CREATEPARAMS._serialized_end=2218 - _CREATERESPONSE._serialized_start=2220 - _CREATERESPONSE._serialized_end=2265 - _METADATAUSAGEPARAMS._serialized_start=2267 - _METADATAUSAGEPARAMS._serialized_end=2331 - _METADATAUSAGERESPONSE._serialized_start=2333 - _METADATAUSAGERESPONSE._serialized_end=2454 - _KEYCOUNT._serialized_start=2456 - _KEYCOUNT._serialized_end=2494 - _LISTCOLLECTIONSPARAMS._serialized_start=2496 - _LISTCOLLECTIONSPARAMS._serialized_end=2562 - _LISTCOLLECTIONSRESPONSE._serialized_start=2564 - _LISTCOLLECTIONSRESPONSE._serialized_end=2639 - _LOOKUPSTREAMSPARAMS._serialized_start=2642 - _LOOKUPSTREAMSPARAMS._serialized_end=2813 - _LOOKUPSTREAMSRESPONSE._serialized_start=2815 - _LOOKUPSTREAMSRESPONSE._serialized_end=2909 - _NEARESTPARAMS._serialized_start=2911 - _NEARESTPARAMS._serialized_end=2994 - _NEARESTRESPONSE._serialized_start=2996 - _NEARESTRESPONSE._serialized_end=3118 - _CHANGESPARAMS._serialized_start=3120 - _CHANGESPARAMS._serialized_end=3205 - _CHANGESRESPONSE._serialized_start=3207 - _CHANGESRESPONSE._serialized_end=3334 - _ROUNDSPEC._serialized_start=3336 - _ROUNDSPEC._serialized_end=3371 - _INSERTPARAMS._serialized_start=3374 - _INSERTPARAMS._serialized_end=3527 - _ARROWINSERTPARAMS._serialized_start=3530 - _ARROWINSERTPARAMS._serialized_end=3675 - _INSERTRESPONSE._serialized_start=3677 - _INSERTRESPONSE._serialized_end=3766 - _DELETEPARAMS._serialized_start=3768 - _DELETEPARAMS._serialized_end=3824 - _DELETERESPONSE._serialized_start=3826 - _DELETERESPONSE._serialized_end=3915 - _INFOPARAMS._serialized_start=3917 - _INFOPARAMS._serialized_end=3929 - _INFORESPONSE._serialized_start=3932 - _INFORESPONSE._serialized_end=4094 - _PROXYINFO._serialized_start=4096 - _PROXYINFO._serialized_end=4131 - _FAULTINJECTPARAMS._serialized_start=4133 - _FAULTINJECTPARAMS._serialized_end=4182 - _FAULTINJECTRESPONSE._serialized_start=4184 - _FAULTINJECTRESPONSE._serialized_end=4246 - _FLUSHPARAMS._serialized_start=4248 - _FLUSHPARAMS._serialized_end=4275 - _FLUSHRESPONSE._serialized_start=4277 - _FLUSHRESPONSE._serialized_end=4365 - _OBLITERATEPARAMS._serialized_start=4367 - _OBLITERATEPARAMS._serialized_end=4399 - _OBLITERATERESPONSE._serialized_start=4401 - _OBLITERATERESPONSE._serialized_end=4450 - _RAWPOINT._serialized_start=4452 - _RAWPOINT._serialized_end=4491 - _STATPOINT._serialized_start=4493 - _STATPOINT._serialized_end=4589 - _CHANGEDRANGE._serialized_start=4591 - _CHANGEDRANGE._serialized_end=4633 - _STATUS._serialized_start=4635 - _STATUS._serialized_end=4697 - _MASH._serialized_start=4700 - _MASH._serialized_end=4852 - _MEMBER._serialized_start=4855 - _MEMBER._serialized_end=5050 - _KEYOPTVALUE._serialized_start=5052 - _KEYOPTVALUE._serialized_end=5108 - _OPTVALUE._serialized_start=5110 - _OPTVALUE._serialized_end=5135 - _KEYVALUE._serialized_start=5137 - _KEYVALUE._serialized_end=5175 - _STREAMCSVCONFIG._serialized_start=5177 - _STREAMCSVCONFIG._serialized_end=5240 - _GENERATECSVPARAMS._serialized_start=5243 - _GENERATECSVPARAMS._serialized_end=5528 - _GENERATECSVPARAMS_QUERYTYPE._serialized_start=5456 - _GENERATECSVPARAMS_QUERYTYPE._serialized_end=5528 - _GENERATECSVRESPONSE._serialized_start=5530 - _GENERATECSVRESPONSE._serialized_end=5611 - _SQLQUERYPARAMS._serialized_start=5613 - _SQLQUERYPARAMS._serialized_end=5687 - _SQLQUERYRESPONSE._serialized_start=5689 - _SQLQUERYRESPONSE._serialized_end=5757 - _ROLE._serialized_start=5759 - _ROLE._serialized_end=5779 - _SETCOMPACTIONCONFIGPARAMS._serialized_start=5782 - _SETCOMPACTIONCONFIGPARAMS._serialized_end=5930 - _SETCOMPACTIONCONFIGRESPONSE._serialized_start=5932 - _SETCOMPACTIONCONFIGRESPONSE._serialized_end=5990 - _GETCOMPACTIONCONFIGPARAMS._serialized_start=5992 - _GETCOMPACTIONCONFIGPARAMS._serialized_end=6033 - _GETCOMPACTIONCONFIGRESPONSE._serialized_start=6036 - _GETCOMPACTIONCONFIGRESPONSE._serialized_end=6229 - _REDUCEDRESOLUTIONRANGE._serialized_start=6231 - _REDUCEDRESOLUTIONRANGE._serialized_end=6303 - _BTRDB._serialized_start=6368 - _BTRDB._serialized_end=8083 + _globals['_MERGEPOLICY']._serialized_start=6600 + _globals['_MERGEPOLICY']._serialized_end=6660 + _globals['_SUBSCRIPTIONUPDATEOP']._serialized_start=6662 + _globals['_SUBSCRIPTIONUPDATEOP']._serialized_end=6717 + _globals['_RAWVALUESPARAMS']._serialized_start=22 + _globals['_RAWVALUESPARAMS']._serialized_end=103 + _globals['_RAWVALUESRESPONSE']._serialized_start=105 + _globals['_RAWVALUESRESPONSE']._serialized_end=230 + _globals['_ARROWRAWVALUESPARAMS']._serialized_start=232 + _globals['_ARROWRAWVALUESPARAMS']._serialized_end=341 + _globals['_ARROWRAWVALUESRESPONSE']._serialized_start=343 + _globals['_ARROWRAWVALUESRESPONSE']._serialized_end=460 + _globals['_ARROWMULTIVALUESPARAMS']._serialized_start=463 + _globals['_ARROWMULTIVALUESPARAMS']._serialized_end=596 + _globals['_ARROWMULTIVALUESRESPONSE']._serialized_start=598 + _globals['_ARROWMULTIVALUESRESPONSE']._serialized_end=673 + _globals['_RAWPOINTVEC']._serialized_start=675 + _globals['_RAWPOINTVEC']._serialized_end=717 + _globals['_ALIGNEDWINDOWSPARAMS']._serialized_start=719 + _globals['_ALIGNEDWINDOWSPARAMS']._serialized_end=825 + _globals['_ALIGNEDWINDOWSRESPONSE']._serialized_start=828 + _globals['_ALIGNEDWINDOWSRESPONSE']._serialized_end=959 + _globals['_ARROWALIGNEDWINDOWSRESPONSE']._serialized_start=961 + _globals['_ARROWALIGNEDWINDOWSRESPONSE']._serialized_end=1083 + _globals['_WINDOWSPARAMS']._serialized_start=1085 + _globals['_WINDOWSPARAMS']._serialized_end=1194 + _globals['_WINDOWSRESPONSE']._serialized_start=1196 + _globals['_WINDOWSRESPONSE']._serialized_end=1320 + _globals['_ARROWWINDOWSRESPONSE']._serialized_start=1322 + _globals['_ARROWWINDOWSRESPONSE']._serialized_end=1437 + _globals['_STREAMINFOPARAMS']._serialized_start=1439 + _globals['_STREAMINFOPARAMS']._serialized_end=1543 + _globals['_STREAMINFORESPONSE']._serialized_start=1546 + _globals['_STREAMINFORESPONSE']._serialized_end=1684 + _globals['_STREAMDESCRIPTOR']._serialized_start=1687 + _globals['_STREAMDESCRIPTOR']._serialized_end=1839 + _globals['_SETSTREAMANNOTATIONSPARAMS']._serialized_start=1842 + _globals['_SETSTREAMANNOTATIONSPARAMS']._serialized_end=1972 + _globals['_SETSTREAMANNOTATIONSRESPONSE']._serialized_start=1974 + _globals['_SETSTREAMANNOTATIONSRESPONSE']._serialized_end=2033 + _globals['_SETSTREAMTAGSPARAMS']._serialized_start=2036 + _globals['_SETSTREAMTAGSPARAMS']._serialized_end=2174 + _globals['_SETSTREAMTAGSRESPONSE']._serialized_start=2176 + _globals['_SETSTREAMTAGSRESPONSE']._serialized_end=2228 + _globals['_CREATEPARAMS']._serialized_start=2230 + _globals['_CREATEPARAMS']._serialized_end=2353 + _globals['_CREATERESPONSE']._serialized_start=2355 + _globals['_CREATERESPONSE']._serialized_end=2400 + _globals['_METADATAUSAGEPARAMS']._serialized_start=2402 + _globals['_METADATAUSAGEPARAMS']._serialized_end=2466 + _globals['_METADATAUSAGERESPONSE']._serialized_start=2468 + _globals['_METADATAUSAGERESPONSE']._serialized_end=2589 + _globals['_KEYCOUNT']._serialized_start=2591 + _globals['_KEYCOUNT']._serialized_end=2629 + _globals['_LISTCOLLECTIONSPARAMS']._serialized_start=2631 + _globals['_LISTCOLLECTIONSPARAMS']._serialized_end=2697 + _globals['_LISTCOLLECTIONSRESPONSE']._serialized_start=2699 + _globals['_LISTCOLLECTIONSRESPONSE']._serialized_end=2774 + _globals['_LOOKUPSTREAMSPARAMS']._serialized_start=2777 + _globals['_LOOKUPSTREAMSPARAMS']._serialized_end=2948 + _globals['_LOOKUPSTREAMSRESPONSE']._serialized_start=2950 + _globals['_LOOKUPSTREAMSRESPONSE']._serialized_end=3044 + _globals['_NEARESTPARAMS']._serialized_start=3046 + _globals['_NEARESTPARAMS']._serialized_end=3129 + _globals['_NEARESTRESPONSE']._serialized_start=3131 + _globals['_NEARESTRESPONSE']._serialized_end=3253 + _globals['_CHANGESPARAMS']._serialized_start=3255 + _globals['_CHANGESPARAMS']._serialized_end=3340 + _globals['_CHANGESRESPONSE']._serialized_start=3342 + _globals['_CHANGESRESPONSE']._serialized_end=3469 + _globals['_ROUNDSPEC']._serialized_start=3471 + _globals['_ROUNDSPEC']._serialized_end=3506 + _globals['_INSERTPARAMS']._serialized_start=3509 + _globals['_INSERTPARAMS']._serialized_end=3662 + _globals['_ARROWINSERTPARAMS']._serialized_start=3665 + _globals['_ARROWINSERTPARAMS']._serialized_end=3810 + _globals['_INSERTRESPONSE']._serialized_start=3812 + _globals['_INSERTRESPONSE']._serialized_end=3901 + _globals['_DELETEPARAMS']._serialized_start=3903 + _globals['_DELETEPARAMS']._serialized_end=3959 + _globals['_DELETERESPONSE']._serialized_start=3961 + _globals['_DELETERESPONSE']._serialized_end=4050 + _globals['_INFOPARAMS']._serialized_start=4052 + _globals['_INFOPARAMS']._serialized_end=4064 + _globals['_INFORESPONSE']._serialized_start=4067 + _globals['_INFORESPONSE']._serialized_end=4229 + _globals['_PROXYINFO']._serialized_start=4231 + _globals['_PROXYINFO']._serialized_end=4266 + _globals['_FAULTINJECTPARAMS']._serialized_start=4268 + _globals['_FAULTINJECTPARAMS']._serialized_end=4317 + _globals['_FAULTINJECTRESPONSE']._serialized_start=4319 + _globals['_FAULTINJECTRESPONSE']._serialized_end=4381 + _globals['_FLUSHPARAMS']._serialized_start=4383 + _globals['_FLUSHPARAMS']._serialized_end=4410 + _globals['_FLUSHRESPONSE']._serialized_start=4412 + _globals['_FLUSHRESPONSE']._serialized_end=4500 + _globals['_OBLITERATEPARAMS']._serialized_start=4502 + _globals['_OBLITERATEPARAMS']._serialized_end=4534 + _globals['_OBLITERATERESPONSE']._serialized_start=4536 + _globals['_OBLITERATERESPONSE']._serialized_end=4585 + _globals['_RAWPOINT']._serialized_start=4587 + _globals['_RAWPOINT']._serialized_end=4626 + _globals['_STATPOINT']._serialized_start=4628 + _globals['_STATPOINT']._serialized_end=4724 + _globals['_CHANGEDRANGE']._serialized_start=4726 + _globals['_CHANGEDRANGE']._serialized_end=4768 + _globals['_STATUS']._serialized_start=4770 + _globals['_STATUS']._serialized_end=4832 + _globals['_MASH']._serialized_start=4835 + _globals['_MASH']._serialized_end=4987 + _globals['_MEMBER']._serialized_start=4990 + _globals['_MEMBER']._serialized_end=5185 + _globals['_KEYOPTVALUE']._serialized_start=5187 + _globals['_KEYOPTVALUE']._serialized_end=5243 + _globals['_OPTVALUE']._serialized_start=5245 + _globals['_OPTVALUE']._serialized_end=5270 + _globals['_KEYVALUE']._serialized_start=5272 + _globals['_KEYVALUE']._serialized_end=5310 + _globals['_STREAMCSVCONFIG']._serialized_start=5312 + _globals['_STREAMCSVCONFIG']._serialized_end=5375 + _globals['_GENERATECSVPARAMS']._serialized_start=5378 + _globals['_GENERATECSVPARAMS']._serialized_end=5663 + _globals['_GENERATECSVPARAMS_QUERYTYPE']._serialized_start=5591 + _globals['_GENERATECSVPARAMS_QUERYTYPE']._serialized_end=5663 + _globals['_GENERATECSVRESPONSE']._serialized_start=5665 + _globals['_GENERATECSVRESPONSE']._serialized_end=5746 + _globals['_SQLQUERYPARAMS']._serialized_start=5748 + _globals['_SQLQUERYPARAMS']._serialized_end=5822 + _globals['_SQLQUERYRESPONSE']._serialized_start=5824 + _globals['_SQLQUERYRESPONSE']._serialized_end=5892 + _globals['_ROLE']._serialized_start=5894 + _globals['_ROLE']._serialized_end=5914 + _globals['_SETCOMPACTIONCONFIGPARAMS']._serialized_start=5917 + _globals['_SETCOMPACTIONCONFIGPARAMS']._serialized_end=6065 + _globals['_SETCOMPACTIONCONFIGRESPONSE']._serialized_start=6067 + _globals['_SETCOMPACTIONCONFIGRESPONSE']._serialized_end=6125 + _globals['_GETCOMPACTIONCONFIGPARAMS']._serialized_start=6127 + _globals['_GETCOMPACTIONCONFIGPARAMS']._serialized_end=6168 + _globals['_GETCOMPACTIONCONFIGRESPONSE']._serialized_start=6171 + _globals['_GETCOMPACTIONCONFIGRESPONSE']._serialized_end=6364 + _globals['_REDUCEDRESOLUTIONRANGE']._serialized_start=6366 + _globals['_REDUCEDRESOLUTIONRANGE']._serialized_end=6438 + _globals['_SUBSCRIPTIONUPDATE']._serialized_start=6440 + _globals['_SUBSCRIPTIONUPDATE']._serialized_end=6515 + _globals['_SUBSCRIPTIONRESP']._serialized_start=6517 + _globals['_SUBSCRIPTIONRESP']._serialized_end=6598 + _globals['_BTRDB']._serialized_start=6720 + _globals['_BTRDB']._serialized_end=8509 # @@protoc_insertion_point(module_scope) diff --git a/btrdb/grpcinterface/btrdb_pb2.pyi b/btrdb/grpcinterface/btrdb_pb2.pyi index ff9b6fc..cc6320c 100644 --- a/btrdb/grpcinterface/btrdb_pb2.pyi +++ b/btrdb/grpcinterface/btrdb_pb2.pyi @@ -5,334 +5,307 @@ from google.protobuf import message as _message from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor -EQUAL: MergePolicy + +class MergePolicy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + NEVER: _ClassVar[MergePolicy] + EQUAL: _ClassVar[MergePolicy] + RETAIN: _ClassVar[MergePolicy] + REPLACE: _ClassVar[MergePolicy] + +class SubscriptionUpdateOp(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + ADD_UUIDS: _ClassVar[SubscriptionUpdateOp] + REMOVE_UUIDS: _ClassVar[SubscriptionUpdateOp] NEVER: MergePolicy -REPLACE: MergePolicy +EQUAL: MergePolicy RETAIN: MergePolicy +REPLACE: MergePolicy +ADD_UUIDS: SubscriptionUpdateOp +REMOVE_UUIDS: SubscriptionUpdateOp -class AlignedWindowsParams(_message.Message): - __slots__ = ["end", "pointWidth", "start", "uuid", "versionMajor"] - END_FIELD_NUMBER: _ClassVar[int] - POINTWIDTH_FIELD_NUMBER: _ClassVar[int] - START_FIELD_NUMBER: _ClassVar[int] +class RawValuesParams(_message.Message): + __slots__ = ("uuid", "start", "end", "versionMajor") UUID_FIELD_NUMBER: _ClassVar[int] + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - end: int - pointWidth: int - start: int uuid: bytes + start: int + end: int versionMajor: int - def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., versionMajor: _Optional[int] = ..., pointWidth: _Optional[int] = ...) -> None: ... + def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., versionMajor: _Optional[int] = ...) -> None: ... -class AlignedWindowsResponse(_message.Message): - __slots__ = ["stat", "values", "versionMajor", "versionMinor"] +class RawValuesResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "values") STAT_FIELD_NUMBER: _ClassVar[int] - VALUES_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] + VALUES_FIELD_NUMBER: _ClassVar[int] stat: Status - values: _containers.RepeatedCompositeFieldContainer[StatPoint] versionMajor: int versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., values: _Optional[_Iterable[_Union[StatPoint, _Mapping]]] = ...) -> None: ... + values: _containers.RepeatedCompositeFieldContainer[RawPoint] + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., values: _Optional[_Iterable[_Union[RawPoint, _Mapping]]] = ...) -> None: ... -class ArrowAlignedWindowsResponse(_message.Message): - __slots__ = ["arrowBytes", "stat", "versionMajor", "versionMinor"] - ARROWBYTES_FIELD_NUMBER: _ClassVar[int] +class ArrowRawValuesParams(_message.Message): + __slots__ = ("uuid", "start", "end", "versionMajor", "templateBytes") + UUID_FIELD_NUMBER: _ClassVar[int] + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + TEMPLATEBYTES_FIELD_NUMBER: _ClassVar[int] + uuid: bytes + start: int + end: int + versionMajor: int + templateBytes: bytes + def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., versionMajor: _Optional[int] = ..., templateBytes: _Optional[bytes] = ...) -> None: ... + +class ArrowRawValuesResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "arrowBytes") STAT_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] - arrowBytes: bytes + ARROWBYTES_FIELD_NUMBER: _ClassVar[int] stat: Status versionMajor: int versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... - -class ArrowInsertParams(_message.Message): - __slots__ = ["arrowBytes", "merge_policy", "rounding", "sync", "uuid"] - ARROWBYTES_FIELD_NUMBER: _ClassVar[int] - MERGE_POLICY_FIELD_NUMBER: _ClassVar[int] - ROUNDING_FIELD_NUMBER: _ClassVar[int] - SYNC_FIELD_NUMBER: _ClassVar[int] - UUID_FIELD_NUMBER: _ClassVar[int] arrowBytes: bytes - merge_policy: MergePolicy - rounding: RoundSpec - sync: bool - uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., sync: bool = ..., merge_policy: _Optional[_Union[MergePolicy, str]] = ..., rounding: _Optional[_Union[RoundSpec, _Mapping]] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... class ArrowMultiValuesParams(_message.Message): - __slots__ = ["end", "snapPeriodNs", "start", "uuid", "versionMajor"] - END_FIELD_NUMBER: _ClassVar[int] - SNAPPERIODNS_FIELD_NUMBER: _ClassVar[int] - START_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("uuid", "versionMajor", "start", "end", "snapPeriodNs", "templateBytes") UUID_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - end: int - snapPeriodNs: int - start: int + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] + SNAPPERIODNS_FIELD_NUMBER: _ClassVar[int] + TEMPLATEBYTES_FIELD_NUMBER: _ClassVar[int] uuid: _containers.RepeatedScalarFieldContainer[bytes] versionMajor: _containers.RepeatedScalarFieldContainer[int] - def __init__(self, uuid: _Optional[_Iterable[bytes]] = ..., versionMajor: _Optional[_Iterable[int]] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., snapPeriodNs: _Optional[int] = ...) -> None: ... + start: int + end: int + snapPeriodNs: int + templateBytes: bytes + def __init__(self, uuid: _Optional[_Iterable[bytes]] = ..., versionMajor: _Optional[_Iterable[int]] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., snapPeriodNs: _Optional[int] = ..., templateBytes: _Optional[bytes] = ...) -> None: ... class ArrowMultiValuesResponse(_message.Message): - __slots__ = ["arrowBytes", "stat"] - ARROWBYTES_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("stat", "arrowBytes") STAT_FIELD_NUMBER: _ClassVar[int] - arrowBytes: bytes + ARROWBYTES_FIELD_NUMBER: _ClassVar[int] stat: Status + arrowBytes: bytes def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... -class ArrowRawValuesResponse(_message.Message): - __slots__ = ["arrowBytes", "stat", "versionMajor", "versionMinor"] - ARROWBYTES_FIELD_NUMBER: _ClassVar[int] +class RawPointVec(_message.Message): + __slots__ = ("time", "value") + TIME_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + time: int + value: _containers.RepeatedScalarFieldContainer[float] + def __init__(self, time: _Optional[int] = ..., value: _Optional[_Iterable[float]] = ...) -> None: ... + +class AlignedWindowsParams(_message.Message): + __slots__ = ("uuid", "start", "end", "versionMajor", "pointWidth") + UUID_FIELD_NUMBER: _ClassVar[int] + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + POINTWIDTH_FIELD_NUMBER: _ClassVar[int] + uuid: bytes + start: int + end: int + versionMajor: int + pointWidth: int + def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., versionMajor: _Optional[int] = ..., pointWidth: _Optional[int] = ...) -> None: ... + +class AlignedWindowsResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "values") STAT_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] - arrowBytes: bytes + VALUES_FIELD_NUMBER: _ClassVar[int] stat: Status versionMajor: int versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... + values: _containers.RepeatedCompositeFieldContainer[StatPoint] + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., values: _Optional[_Iterable[_Union[StatPoint, _Mapping]]] = ...) -> None: ... -class ArrowWindowsResponse(_message.Message): - __slots__ = ["arrowBytes", "stat", "versionMajor", "versionMinor"] - ARROWBYTES_FIELD_NUMBER: _ClassVar[int] +class ArrowAlignedWindowsResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "arrowBytes") STAT_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] - arrowBytes: bytes + ARROWBYTES_FIELD_NUMBER: _ClassVar[int] stat: Status versionMajor: int versionMinor: int + arrowBytes: bytes def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... -class ChangedRange(_message.Message): - __slots__ = ["end", "start"] - END_FIELD_NUMBER: _ClassVar[int] +class WindowsParams(_message.Message): + __slots__ = ("uuid", "start", "end", "versionMajor", "width", "depth") + UUID_FIELD_NUMBER: _ClassVar[int] START_FIELD_NUMBER: _ClassVar[int] - end: int + END_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + WIDTH_FIELD_NUMBER: _ClassVar[int] + DEPTH_FIELD_NUMBER: _ClassVar[int] + uuid: bytes start: int - def __init__(self, start: _Optional[int] = ..., end: _Optional[int] = ...) -> None: ... + end: int + versionMajor: int + width: int + depth: int + def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., versionMajor: _Optional[int] = ..., width: _Optional[int] = ..., depth: _Optional[int] = ...) -> None: ... -class ChangesParams(_message.Message): - __slots__ = ["fromMajor", "resolution", "toMajor", "uuid"] - FROMMAJOR_FIELD_NUMBER: _ClassVar[int] - RESOLUTION_FIELD_NUMBER: _ClassVar[int] - TOMAJOR_FIELD_NUMBER: _ClassVar[int] +class WindowsResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "values") + STAT_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] + VALUES_FIELD_NUMBER: _ClassVar[int] + stat: Status + versionMajor: int + versionMinor: int + values: _containers.RepeatedCompositeFieldContainer[StatPoint] + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., values: _Optional[_Iterable[_Union[StatPoint, _Mapping]]] = ...) -> None: ... + +class ArrowWindowsResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "arrowBytes") + STAT_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] + ARROWBYTES_FIELD_NUMBER: _ClassVar[int] + stat: Status + versionMajor: int + versionMinor: int + arrowBytes: bytes + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... + +class StreamInfoParams(_message.Message): + __slots__ = ("uuid", "omitVersion", "omitDescriptor", "role") UUID_FIELD_NUMBER: _ClassVar[int] - fromMajor: int - resolution: int - toMajor: int + OMITVERSION_FIELD_NUMBER: _ClassVar[int] + OMITDESCRIPTOR_FIELD_NUMBER: _ClassVar[int] + ROLE_FIELD_NUMBER: _ClassVar[int] uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., fromMajor: _Optional[int] = ..., toMajor: _Optional[int] = ..., resolution: _Optional[int] = ...) -> None: ... + omitVersion: bool + omitDescriptor: bool + role: Role + def __init__(self, uuid: _Optional[bytes] = ..., omitVersion: bool = ..., omitDescriptor: bool = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... -class ChangesResponse(_message.Message): - __slots__ = ["ranges", "stat", "versionMajor", "versionMinor"] - RANGES_FIELD_NUMBER: _ClassVar[int] +class StreamInfoResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "descriptor") STAT_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] - ranges: _containers.RepeatedCompositeFieldContainer[ChangedRange] + DESCRIPTOR_FIELD_NUMBER: _ClassVar[int] stat: Status versionMajor: int versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., ranges: _Optional[_Iterable[_Union[ChangedRange, _Mapping]]] = ...) -> None: ... + descriptor: StreamDescriptor + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., descriptor: _Optional[_Union[StreamDescriptor, _Mapping]] = ...) -> None: ... -class CreateParams(_message.Message): - __slots__ = ["annotations", "collection", "tags", "uuid"] - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] +class StreamDescriptor(_message.Message): + __slots__ = ("uuid", "collection", "tags", "annotations", "propertyVersion") + UUID_FIELD_NUMBER: _ClassVar[int] COLLECTION_FIELD_NUMBER: _ClassVar[int] TAGS_FIELD_NUMBER: _ClassVar[int] - UUID_FIELD_NUMBER: _ClassVar[int] - annotations: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + PROPERTYVERSION_FIELD_NUMBER: _ClassVar[int] + uuid: bytes collection: str tags: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + annotations: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + propertyVersion: int + def __init__(self, uuid: _Optional[bytes] = ..., collection: _Optional[str] = ..., tags: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., annotations: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., propertyVersion: _Optional[int] = ...) -> None: ... + +class SetStreamAnnotationsParams(_message.Message): + __slots__ = ("uuid", "expectedPropertyVersion", "changes", "removals") + UUID_FIELD_NUMBER: _ClassVar[int] + EXPECTEDPROPERTYVERSION_FIELD_NUMBER: _ClassVar[int] + CHANGES_FIELD_NUMBER: _ClassVar[int] + REMOVALS_FIELD_NUMBER: _ClassVar[int] uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., collection: _Optional[str] = ..., tags: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., annotations: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ...) -> None: ... + expectedPropertyVersion: int + changes: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + removals: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuid: _Optional[bytes] = ..., expectedPropertyVersion: _Optional[int] = ..., changes: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., removals: _Optional[_Iterable[str]] = ...) -> None: ... -class CreateResponse(_message.Message): - __slots__ = ["stat"] +class SetStreamAnnotationsResponse(_message.Message): + __slots__ = ("stat",) STAT_FIELD_NUMBER: _ClassVar[int] stat: Status def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... -class DeleteParams(_message.Message): - __slots__ = ["end", "start", "uuid"] - END_FIELD_NUMBER: _ClassVar[int] - START_FIELD_NUMBER: _ClassVar[int] +class SetStreamTagsParams(_message.Message): + __slots__ = ("uuid", "expectedPropertyVersion", "tags", "collection", "remove") UUID_FIELD_NUMBER: _ClassVar[int] - end: int - start: int + EXPECTEDPROPERTYVERSION_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + COLLECTION_FIELD_NUMBER: _ClassVar[int] + REMOVE_FIELD_NUMBER: _ClassVar[int] uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ...) -> None: ... + expectedPropertyVersion: int + tags: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + collection: str + remove: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, uuid: _Optional[bytes] = ..., expectedPropertyVersion: _Optional[int] = ..., tags: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., collection: _Optional[str] = ..., remove: _Optional[_Iterable[str]] = ...) -> None: ... -class DeleteResponse(_message.Message): - __slots__ = ["stat", "versionMajor", "versionMinor"] +class SetStreamTagsResponse(_message.Message): + __slots__ = ("stat",) STAT_FIELD_NUMBER: _ClassVar[int] - VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] stat: Status - versionMajor: int - versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ...) -> None: ... + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... -class FaultInjectParams(_message.Message): - __slots__ = ["params", "type"] - PARAMS_FIELD_NUMBER: _ClassVar[int] - TYPE_FIELD_NUMBER: _ClassVar[int] - params: bytes - type: int - def __init__(self, type: _Optional[int] = ..., params: _Optional[bytes] = ...) -> None: ... +class CreateParams(_message.Message): + __slots__ = ("uuid", "collection", "tags", "annotations") + UUID_FIELD_NUMBER: _ClassVar[int] + COLLECTION_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + uuid: bytes + collection: str + tags: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + annotations: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + def __init__(self, uuid: _Optional[bytes] = ..., collection: _Optional[str] = ..., tags: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., annotations: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ...) -> None: ... -class FaultInjectResponse(_message.Message): - __slots__ = ["rv", "stat"] - RV_FIELD_NUMBER: _ClassVar[int] +class CreateResponse(_message.Message): + __slots__ = ("stat",) STAT_FIELD_NUMBER: _ClassVar[int] - rv: bytes stat: Status - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., rv: _Optional[bytes] = ...) -> None: ... + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... -class FlushParams(_message.Message): - __slots__ = ["uuid"] - UUID_FIELD_NUMBER: _ClassVar[int] - uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ...) -> None: ... +class MetadataUsageParams(_message.Message): + __slots__ = ("prefix", "role") + PREFIX_FIELD_NUMBER: _ClassVar[int] + ROLE_FIELD_NUMBER: _ClassVar[int] + prefix: str + role: Role + def __init__(self, prefix: _Optional[str] = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... -class FlushResponse(_message.Message): - __slots__ = ["stat", "versionMajor", "versionMinor"] +class MetadataUsageResponse(_message.Message): + __slots__ = ("stat", "tags", "annotations") STAT_FIELD_NUMBER: _ClassVar[int] - VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] stat: Status - versionMajor: int - versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ...) -> None: ... - -class GenerateCSVParams(_message.Message): - __slots__ = ["depth", "endTime", "includeVersions", "queryType", "startTime", "streams", "windowSize"] - class QueryType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - ALIGNED_WINDOWS_QUERY: GenerateCSVParams.QueryType - DEPTH_FIELD_NUMBER: _ClassVar[int] - ENDTIME_FIELD_NUMBER: _ClassVar[int] - INCLUDEVERSIONS_FIELD_NUMBER: _ClassVar[int] - QUERYTYPE_FIELD_NUMBER: _ClassVar[int] - RAW_QUERY: GenerateCSVParams.QueryType - STARTTIME_FIELD_NUMBER: _ClassVar[int] - STREAMS_FIELD_NUMBER: _ClassVar[int] - WINDOWSIZE_FIELD_NUMBER: _ClassVar[int] - WINDOWS_QUERY: GenerateCSVParams.QueryType - depth: int - endTime: int - includeVersions: bool - queryType: GenerateCSVParams.QueryType - startTime: int - streams: _containers.RepeatedCompositeFieldContainer[StreamCSVConfig] - windowSize: int - def __init__(self, queryType: _Optional[_Union[GenerateCSVParams.QueryType, str]] = ..., startTime: _Optional[int] = ..., endTime: _Optional[int] = ..., windowSize: _Optional[int] = ..., depth: _Optional[int] = ..., includeVersions: bool = ..., streams: _Optional[_Iterable[_Union[StreamCSVConfig, _Mapping]]] = ...) -> None: ... - -class GenerateCSVResponse(_message.Message): - __slots__ = ["isHeader", "row", "stat"] - ISHEADER_FIELD_NUMBER: _ClassVar[int] - ROW_FIELD_NUMBER: _ClassVar[int] - STAT_FIELD_NUMBER: _ClassVar[int] - isHeader: bool - row: _containers.RepeatedScalarFieldContainer[str] - stat: Status - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., isHeader: bool = ..., row: _Optional[_Iterable[str]] = ...) -> None: ... - -class GetCompactionConfigParams(_message.Message): - __slots__ = ["uuid"] - UUID_FIELD_NUMBER: _ClassVar[int] - uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ...) -> None: ... - -class GetCompactionConfigResponse(_message.Message): - __slots__ = ["CompactedVersion", "LatestMajorVersion", "reducedResolutionRanges", "stat", "unused0"] - COMPACTEDVERSION_FIELD_NUMBER: _ClassVar[int] - CompactedVersion: int - LATESTMAJORVERSION_FIELD_NUMBER: _ClassVar[int] - LatestMajorVersion: int - REDUCEDRESOLUTIONRANGES_FIELD_NUMBER: _ClassVar[int] - STAT_FIELD_NUMBER: _ClassVar[int] - UNUSED0_FIELD_NUMBER: _ClassVar[int] - reducedResolutionRanges: _containers.RepeatedCompositeFieldContainer[ReducedResolutionRange] - stat: Status - unused0: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., LatestMajorVersion: _Optional[int] = ..., CompactedVersion: _Optional[int] = ..., reducedResolutionRanges: _Optional[_Iterable[_Union[ReducedResolutionRange, _Mapping]]] = ..., unused0: _Optional[int] = ...) -> None: ... - -class InfoParams(_message.Message): - __slots__ = [] - def __init__(self) -> None: ... - -class InfoResponse(_message.Message): - __slots__ = ["build", "majorVersion", "mash", "minorVersion", "proxy", "stat"] - BUILD_FIELD_NUMBER: _ClassVar[int] - MAJORVERSION_FIELD_NUMBER: _ClassVar[int] - MASH_FIELD_NUMBER: _ClassVar[int] - MINORVERSION_FIELD_NUMBER: _ClassVar[int] - PROXY_FIELD_NUMBER: _ClassVar[int] - STAT_FIELD_NUMBER: _ClassVar[int] - build: str - majorVersion: int - mash: Mash - minorVersion: int - proxy: ProxyInfo - stat: Status - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., mash: _Optional[_Union[Mash, _Mapping]] = ..., majorVersion: _Optional[int] = ..., minorVersion: _Optional[int] = ..., build: _Optional[str] = ..., proxy: _Optional[_Union[ProxyInfo, _Mapping]] = ...) -> None: ... - -class InsertParams(_message.Message): - __slots__ = ["merge_policy", "rounding", "sync", "uuid", "values"] - MERGE_POLICY_FIELD_NUMBER: _ClassVar[int] - ROUNDING_FIELD_NUMBER: _ClassVar[int] - SYNC_FIELD_NUMBER: _ClassVar[int] - UUID_FIELD_NUMBER: _ClassVar[int] - VALUES_FIELD_NUMBER: _ClassVar[int] - merge_policy: MergePolicy - rounding: RoundSpec - sync: bool - uuid: bytes - values: _containers.RepeatedCompositeFieldContainer[RawPoint] - def __init__(self, uuid: _Optional[bytes] = ..., sync: bool = ..., merge_policy: _Optional[_Union[MergePolicy, str]] = ..., rounding: _Optional[_Union[RoundSpec, _Mapping]] = ..., values: _Optional[_Iterable[_Union[RawPoint, _Mapping]]] = ...) -> None: ... - -class InsertResponse(_message.Message): - __slots__ = ["stat", "versionMajor", "versionMinor"] - STAT_FIELD_NUMBER: _ClassVar[int] - VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] - stat: Status - versionMajor: int - versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ...) -> None: ... + tags: _containers.RepeatedCompositeFieldContainer[KeyCount] + annotations: _containers.RepeatedCompositeFieldContainer[KeyCount] + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., tags: _Optional[_Iterable[_Union[KeyCount, _Mapping]]] = ..., annotations: _Optional[_Iterable[_Union[KeyCount, _Mapping]]] = ...) -> None: ... class KeyCount(_message.Message): - __slots__ = ["count", "key"] - COUNT_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("key", "count") KEY_FIELD_NUMBER: _ClassVar[int] - count: int + COUNT_FIELD_NUMBER: _ClassVar[int] key: str + count: int def __init__(self, key: _Optional[str] = ..., count: _Optional[int] = ...) -> None: ... -class KeyOptValue(_message.Message): - __slots__ = ["key", "val"] - KEY_FIELD_NUMBER: _ClassVar[int] - VAL_FIELD_NUMBER: _ClassVar[int] - key: str - val: OptValue - def __init__(self, key: _Optional[str] = ..., val: _Optional[_Union[OptValue, _Mapping]] = ...) -> None: ... - -class KeyValue(_message.Message): - __slots__ = ["key", "value"] - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: str - def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... - class ListCollectionsParams(_message.Message): - __slots__ = ["prefix", "role"] + __slots__ = ("prefix", "role") PREFIX_FIELD_NUMBER: _ClassVar[int] ROLE_FIELD_NUMBER: _ClassVar[int] prefix: str @@ -340,381 +313,456 @@ class ListCollectionsParams(_message.Message): def __init__(self, prefix: _Optional[str] = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... class ListCollectionsResponse(_message.Message): - __slots__ = ["collections", "stat"] - COLLECTIONS_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("stat", "collections") STAT_FIELD_NUMBER: _ClassVar[int] - collections: _containers.RepeatedScalarFieldContainer[str] + COLLECTIONS_FIELD_NUMBER: _ClassVar[int] stat: Status + collections: _containers.RepeatedScalarFieldContainer[str] def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., collections: _Optional[_Iterable[str]] = ...) -> None: ... class LookupStreamsParams(_message.Message): - __slots__ = ["annotations", "collection", "isCollectionPrefix", "role", "tags"] - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("collection", "isCollectionPrefix", "tags", "annotations", "role") COLLECTION_FIELD_NUMBER: _ClassVar[int] ISCOLLECTIONPREFIX_FIELD_NUMBER: _ClassVar[int] - ROLE_FIELD_NUMBER: _ClassVar[int] TAGS_FIELD_NUMBER: _ClassVar[int] - annotations: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + ROLE_FIELD_NUMBER: _ClassVar[int] collection: str isCollectionPrefix: bool - role: Role tags: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + annotations: _containers.RepeatedCompositeFieldContainer[KeyOptValue] + role: Role def __init__(self, collection: _Optional[str] = ..., isCollectionPrefix: bool = ..., tags: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., annotations: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... class LookupStreamsResponse(_message.Message): - __slots__ = ["results", "stat"] - RESULTS_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("stat", "results") STAT_FIELD_NUMBER: _ClassVar[int] - results: _containers.RepeatedCompositeFieldContainer[StreamDescriptor] + RESULTS_FIELD_NUMBER: _ClassVar[int] stat: Status + results: _containers.RepeatedCompositeFieldContainer[StreamDescriptor] def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., results: _Optional[_Iterable[_Union[StreamDescriptor, _Mapping]]] = ...) -> None: ... -class Mash(_message.Message): - __slots__ = ["healthy", "leader", "leaderRevision", "members", "revision", "totalWeight", "unmapped"] - HEALTHY_FIELD_NUMBER: _ClassVar[int] - LEADERREVISION_FIELD_NUMBER: _ClassVar[int] - LEADER_FIELD_NUMBER: _ClassVar[int] - MEMBERS_FIELD_NUMBER: _ClassVar[int] - REVISION_FIELD_NUMBER: _ClassVar[int] - TOTALWEIGHT_FIELD_NUMBER: _ClassVar[int] - UNMAPPED_FIELD_NUMBER: _ClassVar[int] - healthy: bool - leader: str - leaderRevision: int - members: _containers.RepeatedCompositeFieldContainer[Member] - revision: int - totalWeight: int - unmapped: float - def __init__(self, revision: _Optional[int] = ..., leader: _Optional[str] = ..., leaderRevision: _Optional[int] = ..., totalWeight: _Optional[int] = ..., healthy: bool = ..., unmapped: _Optional[float] = ..., members: _Optional[_Iterable[_Union[Member, _Mapping]]] = ...) -> None: ... - -class Member(_message.Message): - __slots__ = ["enabled", "end", "grpcEndpoints", "hash", "httpEndpoints", "nodename", "readPreference", "start", "up", "weight"] - ENABLED_FIELD_NUMBER: _ClassVar[int] - END_FIELD_NUMBER: _ClassVar[int] - GRPCENDPOINTS_FIELD_NUMBER: _ClassVar[int] - HASH_FIELD_NUMBER: _ClassVar[int] - HTTPENDPOINTS_FIELD_NUMBER: _ClassVar[int] - IN_FIELD_NUMBER: _ClassVar[int] - NODENAME_FIELD_NUMBER: _ClassVar[int] - READPREFERENCE_FIELD_NUMBER: _ClassVar[int] - START_FIELD_NUMBER: _ClassVar[int] - UP_FIELD_NUMBER: _ClassVar[int] - WEIGHT_FIELD_NUMBER: _ClassVar[int] - enabled: bool - end: int - grpcEndpoints: str - hash: int - httpEndpoints: str - nodename: str - readPreference: float - start: int - up: bool - weight: int - def __init__(self, hash: _Optional[int] = ..., nodename: _Optional[str] = ..., up: bool = ..., enabled: bool = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., weight: _Optional[int] = ..., readPreference: _Optional[float] = ..., httpEndpoints: _Optional[str] = ..., grpcEndpoints: _Optional[str] = ..., **kwargs) -> None: ... - -class MetadataUsageParams(_message.Message): - __slots__ = ["prefix", "role"] - PREFIX_FIELD_NUMBER: _ClassVar[int] - ROLE_FIELD_NUMBER: _ClassVar[int] - prefix: str - role: Role - def __init__(self, prefix: _Optional[str] = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... - -class MetadataUsageResponse(_message.Message): - __slots__ = ["annotations", "stat", "tags"] - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] - STAT_FIELD_NUMBER: _ClassVar[int] - TAGS_FIELD_NUMBER: _ClassVar[int] - annotations: _containers.RepeatedCompositeFieldContainer[KeyCount] - stat: Status - tags: _containers.RepeatedCompositeFieldContainer[KeyCount] - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., tags: _Optional[_Iterable[_Union[KeyCount, _Mapping]]] = ..., annotations: _Optional[_Iterable[_Union[KeyCount, _Mapping]]] = ...) -> None: ... - class NearestParams(_message.Message): - __slots__ = ["backward", "time", "uuid", "versionMajor"] - BACKWARD_FIELD_NUMBER: _ClassVar[int] - TIME_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("uuid", "time", "versionMajor", "backward") UUID_FIELD_NUMBER: _ClassVar[int] + TIME_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - backward: bool - time: int + BACKWARD_FIELD_NUMBER: _ClassVar[int] uuid: bytes + time: int versionMajor: int + backward: bool def __init__(self, uuid: _Optional[bytes] = ..., time: _Optional[int] = ..., versionMajor: _Optional[int] = ..., backward: bool = ...) -> None: ... class NearestResponse(_message.Message): - __slots__ = ["stat", "value", "versionMajor", "versionMinor"] + __slots__ = ("stat", "versionMajor", "versionMinor", "value") STAT_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] stat: Status - value: RawPoint versionMajor: int versionMinor: int + value: RawPoint def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., value: _Optional[_Union[RawPoint, _Mapping]] = ...) -> None: ... -class ObliterateParams(_message.Message): - __slots__ = ["uuid"] - UUID_FIELD_NUMBER: _ClassVar[int] - uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ...) -> None: ... - -class ObliterateResponse(_message.Message): - __slots__ = ["stat"] - STAT_FIELD_NUMBER: _ClassVar[int] - stat: Status - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... - -class OptValue(_message.Message): - __slots__ = ["value"] - VALUE_FIELD_NUMBER: _ClassVar[int] - value: str - def __init__(self, value: _Optional[str] = ...) -> None: ... - -class ProxyInfo(_message.Message): - __slots__ = ["proxyEndpoints"] - PROXYENDPOINTS_FIELD_NUMBER: _ClassVar[int] - proxyEndpoints: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, proxyEndpoints: _Optional[_Iterable[str]] = ...) -> None: ... - -class RawPoint(_message.Message): - __slots__ = ["time", "value"] - TIME_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - time: int - value: float - def __init__(self, time: _Optional[int] = ..., value: _Optional[float] = ...) -> None: ... - -class RawPointVec(_message.Message): - __slots__ = ["time", "value"] - TIME_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - time: int - value: _containers.RepeatedScalarFieldContainer[float] - def __init__(self, time: _Optional[int] = ..., value: _Optional[_Iterable[float]] = ...) -> None: ... - -class RawValuesParams(_message.Message): - __slots__ = ["end", "start", "uuid", "versionMajor"] - END_FIELD_NUMBER: _ClassVar[int] - START_FIELD_NUMBER: _ClassVar[int] +class ChangesParams(_message.Message): + __slots__ = ("uuid", "fromMajor", "toMajor", "resolution") UUID_FIELD_NUMBER: _ClassVar[int] - VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - end: int - start: int + FROMMAJOR_FIELD_NUMBER: _ClassVar[int] + TOMAJOR_FIELD_NUMBER: _ClassVar[int] + RESOLUTION_FIELD_NUMBER: _ClassVar[int] uuid: bytes - versionMajor: int - def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., versionMajor: _Optional[int] = ...) -> None: ... + fromMajor: int + toMajor: int + resolution: int + def __init__(self, uuid: _Optional[bytes] = ..., fromMajor: _Optional[int] = ..., toMajor: _Optional[int] = ..., resolution: _Optional[int] = ...) -> None: ... -class RawValuesResponse(_message.Message): - __slots__ = ["stat", "values", "versionMajor", "versionMinor"] +class ChangesResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor", "ranges") STAT_FIELD_NUMBER: _ClassVar[int] - VALUES_FIELD_NUMBER: _ClassVar[int] VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] + RANGES_FIELD_NUMBER: _ClassVar[int] stat: Status - values: _containers.RepeatedCompositeFieldContainer[RawPoint] versionMajor: int versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., values: _Optional[_Iterable[_Union[RawPoint, _Mapping]]] = ...) -> None: ... - -class ReducedResolutionRange(_message.Message): - __slots__ = ["End", "Resolution", "Start"] - END_FIELD_NUMBER: _ClassVar[int] - End: int - RESOLUTION_FIELD_NUMBER: _ClassVar[int] - Resolution: int - START_FIELD_NUMBER: _ClassVar[int] - Start: int - def __init__(self, Start: _Optional[int] = ..., End: _Optional[int] = ..., Resolution: _Optional[int] = ...) -> None: ... - -class Role(_message.Message): - __slots__ = ["name"] - NAME_FIELD_NUMBER: _ClassVar[int] - name: str - def __init__(self, name: _Optional[str] = ...) -> None: ... + ranges: _containers.RepeatedCompositeFieldContainer[ChangedRange] + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., ranges: _Optional[_Iterable[_Union[ChangedRange, _Mapping]]] = ...) -> None: ... class RoundSpec(_message.Message): - __slots__ = ["bits"] + __slots__ = ("bits",) BITS_FIELD_NUMBER: _ClassVar[int] bits: int def __init__(self, bits: _Optional[int] = ...) -> None: ... -class SQLQueryParams(_message.Message): - __slots__ = ["params", "query", "role"] - PARAMS_FIELD_NUMBER: _ClassVar[int] - QUERY_FIELD_NUMBER: _ClassVar[int] - ROLE_FIELD_NUMBER: _ClassVar[int] - params: _containers.RepeatedScalarFieldContainer[str] - query: str - role: Role - def __init__(self, query: _Optional[str] = ..., params: _Optional[_Iterable[str]] = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... +class InsertParams(_message.Message): + __slots__ = ("uuid", "sync", "merge_policy", "rounding", "values") + UUID_FIELD_NUMBER: _ClassVar[int] + SYNC_FIELD_NUMBER: _ClassVar[int] + MERGE_POLICY_FIELD_NUMBER: _ClassVar[int] + ROUNDING_FIELD_NUMBER: _ClassVar[int] + VALUES_FIELD_NUMBER: _ClassVar[int] + uuid: bytes + sync: bool + merge_policy: MergePolicy + rounding: RoundSpec + values: _containers.RepeatedCompositeFieldContainer[RawPoint] + def __init__(self, uuid: _Optional[bytes] = ..., sync: bool = ..., merge_policy: _Optional[_Union[MergePolicy, str]] = ..., rounding: _Optional[_Union[RoundSpec, _Mapping]] = ..., values: _Optional[_Iterable[_Union[RawPoint, _Mapping]]] = ...) -> None: ... -class SQLQueryResponse(_message.Message): - __slots__ = ["SQLQueryRow", "stat"] - SQLQUERYROW_FIELD_NUMBER: _ClassVar[int] - SQLQueryRow: _containers.RepeatedScalarFieldContainer[bytes] +class ArrowInsertParams(_message.Message): + __slots__ = ("uuid", "sync", "merge_policy", "rounding", "arrowBytes") + UUID_FIELD_NUMBER: _ClassVar[int] + SYNC_FIELD_NUMBER: _ClassVar[int] + MERGE_POLICY_FIELD_NUMBER: _ClassVar[int] + ROUNDING_FIELD_NUMBER: _ClassVar[int] + ARROWBYTES_FIELD_NUMBER: _ClassVar[int] + uuid: bytes + sync: bool + merge_policy: MergePolicy + rounding: RoundSpec + arrowBytes: bytes + def __init__(self, uuid: _Optional[bytes] = ..., sync: bool = ..., merge_policy: _Optional[_Union[MergePolicy, str]] = ..., rounding: _Optional[_Union[RoundSpec, _Mapping]] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... + +class InsertResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor") STAT_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] stat: Status - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., SQLQueryRow: _Optional[_Iterable[bytes]] = ...) -> None: ... + versionMajor: int + versionMinor: int + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ...) -> None: ... -class SetCompactionConfigParams(_message.Message): - __slots__ = ["CompactedVersion", "reducedResolutionRanges", "unused0", "uuid"] - COMPACTEDVERSION_FIELD_NUMBER: _ClassVar[int] - CompactedVersion: int - REDUCEDRESOLUTIONRANGES_FIELD_NUMBER: _ClassVar[int] - UNUSED0_FIELD_NUMBER: _ClassVar[int] +class DeleteParams(_message.Message): + __slots__ = ("uuid", "start", "end") UUID_FIELD_NUMBER: _ClassVar[int] - reducedResolutionRanges: _containers.RepeatedCompositeFieldContainer[ReducedResolutionRange] - unused0: int + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., CompactedVersion: _Optional[int] = ..., reducedResolutionRanges: _Optional[_Iterable[_Union[ReducedResolutionRange, _Mapping]]] = ..., unused0: _Optional[int] = ...) -> None: ... + start: int + end: int + def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ...) -> None: ... -class SetCompactionConfigResponse(_message.Message): - __slots__ = ["stat"] +class DeleteResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor") STAT_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] stat: Status - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... + versionMajor: int + versionMinor: int + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ...) -> None: ... -class SetStreamAnnotationsParams(_message.Message): - __slots__ = ["changes", "expectedPropertyVersion", "removals", "uuid"] - CHANGES_FIELD_NUMBER: _ClassVar[int] - EXPECTEDPROPERTYVERSION_FIELD_NUMBER: _ClassVar[int] - REMOVALS_FIELD_NUMBER: _ClassVar[int] +class InfoParams(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class InfoResponse(_message.Message): + __slots__ = ("stat", "mash", "majorVersion", "minorVersion", "build", "proxy") + STAT_FIELD_NUMBER: _ClassVar[int] + MASH_FIELD_NUMBER: _ClassVar[int] + MAJORVERSION_FIELD_NUMBER: _ClassVar[int] + MINORVERSION_FIELD_NUMBER: _ClassVar[int] + BUILD_FIELD_NUMBER: _ClassVar[int] + PROXY_FIELD_NUMBER: _ClassVar[int] + stat: Status + mash: Mash + majorVersion: int + minorVersion: int + build: str + proxy: ProxyInfo + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., mash: _Optional[_Union[Mash, _Mapping]] = ..., majorVersion: _Optional[int] = ..., minorVersion: _Optional[int] = ..., build: _Optional[str] = ..., proxy: _Optional[_Union[ProxyInfo, _Mapping]] = ...) -> None: ... + +class ProxyInfo(_message.Message): + __slots__ = ("proxyEndpoints",) + PROXYENDPOINTS_FIELD_NUMBER: _ClassVar[int] + proxyEndpoints: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, proxyEndpoints: _Optional[_Iterable[str]] = ...) -> None: ... + +class FaultInjectParams(_message.Message): + __slots__ = ("type", "params") + TYPE_FIELD_NUMBER: _ClassVar[int] + PARAMS_FIELD_NUMBER: _ClassVar[int] + type: int + params: bytes + def __init__(self, type: _Optional[int] = ..., params: _Optional[bytes] = ...) -> None: ... + +class FaultInjectResponse(_message.Message): + __slots__ = ("stat", "rv") + STAT_FIELD_NUMBER: _ClassVar[int] + RV_FIELD_NUMBER: _ClassVar[int] + stat: Status + rv: bytes + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., rv: _Optional[bytes] = ...) -> None: ... + +class FlushParams(_message.Message): + __slots__ = ("uuid",) UUID_FIELD_NUMBER: _ClassVar[int] - changes: _containers.RepeatedCompositeFieldContainer[KeyOptValue] - expectedPropertyVersion: int - removals: _containers.RepeatedScalarFieldContainer[str] uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., expectedPropertyVersion: _Optional[int] = ..., changes: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., removals: _Optional[_Iterable[str]] = ...) -> None: ... + def __init__(self, uuid: _Optional[bytes] = ...) -> None: ... -class SetStreamAnnotationsResponse(_message.Message): - __slots__ = ["stat"] +class FlushResponse(_message.Message): + __slots__ = ("stat", "versionMajor", "versionMinor") STAT_FIELD_NUMBER: _ClassVar[int] + VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] + VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] stat: Status - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... + versionMajor: int + versionMinor: int + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ...) -> None: ... -class SetStreamTagsParams(_message.Message): - __slots__ = ["collection", "expectedPropertyVersion", "remove", "tags", "uuid"] - COLLECTION_FIELD_NUMBER: _ClassVar[int] - EXPECTEDPROPERTYVERSION_FIELD_NUMBER: _ClassVar[int] - REMOVE_FIELD_NUMBER: _ClassVar[int] - TAGS_FIELD_NUMBER: _ClassVar[int] +class ObliterateParams(_message.Message): + __slots__ = ("uuid",) UUID_FIELD_NUMBER: _ClassVar[int] - collection: str - expectedPropertyVersion: int - remove: _containers.RepeatedScalarFieldContainer[str] - tags: _containers.RepeatedCompositeFieldContainer[KeyOptValue] uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., expectedPropertyVersion: _Optional[int] = ..., tags: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., collection: _Optional[str] = ..., remove: _Optional[_Iterable[str]] = ...) -> None: ... + def __init__(self, uuid: _Optional[bytes] = ...) -> None: ... -class SetStreamTagsResponse(_message.Message): - __slots__ = ["stat"] +class ObliterateResponse(_message.Message): + __slots__ = ("stat",) STAT_FIELD_NUMBER: _ClassVar[int] stat: Status def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... +class RawPoint(_message.Message): + __slots__ = ("time", "value") + TIME_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + time: int + value: float + def __init__(self, time: _Optional[int] = ..., value: _Optional[float] = ...) -> None: ... + class StatPoint(_message.Message): - __slots__ = ["count", "max", "mean", "min", "stddev", "time"] - COUNT_FIELD_NUMBER: _ClassVar[int] - MAX_FIELD_NUMBER: _ClassVar[int] - MEAN_FIELD_NUMBER: _ClassVar[int] + __slots__ = ("time", "min", "mean", "max", "count", "stddev") + TIME_FIELD_NUMBER: _ClassVar[int] MIN_FIELD_NUMBER: _ClassVar[int] + MEAN_FIELD_NUMBER: _ClassVar[int] + MAX_FIELD_NUMBER: _ClassVar[int] + COUNT_FIELD_NUMBER: _ClassVar[int] STDDEV_FIELD_NUMBER: _ClassVar[int] - TIME_FIELD_NUMBER: _ClassVar[int] - count: int - max: float - mean: float + time: int min: float + mean: float + max: float + count: int stddev: float - time: int def __init__(self, time: _Optional[int] = ..., min: _Optional[float] = ..., mean: _Optional[float] = ..., max: _Optional[float] = ..., count: _Optional[int] = ..., stddev: _Optional[float] = ...) -> None: ... +class ChangedRange(_message.Message): + __slots__ = ("start", "end") + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] + start: int + end: int + def __init__(self, start: _Optional[int] = ..., end: _Optional[int] = ...) -> None: ... + class Status(_message.Message): - __slots__ = ["code", "mash", "msg"] + __slots__ = ("code", "msg", "mash") CODE_FIELD_NUMBER: _ClassVar[int] - MASH_FIELD_NUMBER: _ClassVar[int] MSG_FIELD_NUMBER: _ClassVar[int] + MASH_FIELD_NUMBER: _ClassVar[int] code: int - mash: Mash msg: str + mash: Mash def __init__(self, code: _Optional[int] = ..., msg: _Optional[str] = ..., mash: _Optional[_Union[Mash, _Mapping]] = ...) -> None: ... +class Mash(_message.Message): + __slots__ = ("revision", "leader", "leaderRevision", "totalWeight", "healthy", "unmapped", "members") + REVISION_FIELD_NUMBER: _ClassVar[int] + LEADER_FIELD_NUMBER: _ClassVar[int] + LEADERREVISION_FIELD_NUMBER: _ClassVar[int] + TOTALWEIGHT_FIELD_NUMBER: _ClassVar[int] + HEALTHY_FIELD_NUMBER: _ClassVar[int] + UNMAPPED_FIELD_NUMBER: _ClassVar[int] + MEMBERS_FIELD_NUMBER: _ClassVar[int] + revision: int + leader: str + leaderRevision: int + totalWeight: int + healthy: bool + unmapped: float + members: _containers.RepeatedCompositeFieldContainer[Member] + def __init__(self, revision: _Optional[int] = ..., leader: _Optional[str] = ..., leaderRevision: _Optional[int] = ..., totalWeight: _Optional[int] = ..., healthy: bool = ..., unmapped: _Optional[float] = ..., members: _Optional[_Iterable[_Union[Member, _Mapping]]] = ...) -> None: ... + +class Member(_message.Message): + __slots__ = ("hash", "nodename", "up", "enabled", "start", "end", "weight", "readPreference", "httpEndpoints", "grpcEndpoints") + HASH_FIELD_NUMBER: _ClassVar[int] + NODENAME_FIELD_NUMBER: _ClassVar[int] + UP_FIELD_NUMBER: _ClassVar[int] + IN_FIELD_NUMBER: _ClassVar[int] + ENABLED_FIELD_NUMBER: _ClassVar[int] + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] + WEIGHT_FIELD_NUMBER: _ClassVar[int] + READPREFERENCE_FIELD_NUMBER: _ClassVar[int] + HTTPENDPOINTS_FIELD_NUMBER: _ClassVar[int] + GRPCENDPOINTS_FIELD_NUMBER: _ClassVar[int] + hash: int + nodename: str + up: bool + enabled: bool + start: int + end: int + weight: int + readPreference: float + httpEndpoints: str + grpcEndpoints: str + def __init__(self, hash: _Optional[int] = ..., nodename: _Optional[str] = ..., up: bool = ..., enabled: bool = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., weight: _Optional[int] = ..., readPreference: _Optional[float] = ..., httpEndpoints: _Optional[str] = ..., grpcEndpoints: _Optional[str] = ..., **kwargs) -> None: ... + +class KeyOptValue(_message.Message): + __slots__ = ("key", "val") + KEY_FIELD_NUMBER: _ClassVar[int] + VAL_FIELD_NUMBER: _ClassVar[int] + key: str + val: OptValue + def __init__(self, key: _Optional[str] = ..., val: _Optional[_Union[OptValue, _Mapping]] = ...) -> None: ... + +class OptValue(_message.Message): + __slots__ = ("value",) + VALUE_FIELD_NUMBER: _ClassVar[int] + value: str + def __init__(self, value: _Optional[str] = ...) -> None: ... + +class KeyValue(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + class StreamCSVConfig(_message.Message): - __slots__ = ["label", "uuid", "version"] + __slots__ = ("version", "label", "uuid") + VERSION_FIELD_NUMBER: _ClassVar[int] LABEL_FIELD_NUMBER: _ClassVar[int] UUID_FIELD_NUMBER: _ClassVar[int] - VERSION_FIELD_NUMBER: _ClassVar[int] + version: int label: str uuid: bytes - version: int def __init__(self, version: _Optional[int] = ..., label: _Optional[str] = ..., uuid: _Optional[bytes] = ...) -> None: ... -class StreamDescriptor(_message.Message): - __slots__ = ["annotations", "collection", "propertyVersion", "tags", "uuid"] - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] - COLLECTION_FIELD_NUMBER: _ClassVar[int] - PROPERTYVERSION_FIELD_NUMBER: _ClassVar[int] - TAGS_FIELD_NUMBER: _ClassVar[int] - UUID_FIELD_NUMBER: _ClassVar[int] - annotations: _containers.RepeatedCompositeFieldContainer[KeyOptValue] - collection: str - propertyVersion: int - tags: _containers.RepeatedCompositeFieldContainer[KeyOptValue] - uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., collection: _Optional[str] = ..., tags: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., annotations: _Optional[_Iterable[_Union[KeyOptValue, _Mapping]]] = ..., propertyVersion: _Optional[int] = ...) -> None: ... +class GenerateCSVParams(_message.Message): + __slots__ = ("queryType", "startTime", "endTime", "windowSize", "depth", "includeVersions", "streams") + class QueryType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + ALIGNED_WINDOWS_QUERY: _ClassVar[GenerateCSVParams.QueryType] + WINDOWS_QUERY: _ClassVar[GenerateCSVParams.QueryType] + RAW_QUERY: _ClassVar[GenerateCSVParams.QueryType] + ALIGNED_WINDOWS_QUERY: GenerateCSVParams.QueryType + WINDOWS_QUERY: GenerateCSVParams.QueryType + RAW_QUERY: GenerateCSVParams.QueryType + QUERYTYPE_FIELD_NUMBER: _ClassVar[int] + STARTTIME_FIELD_NUMBER: _ClassVar[int] + ENDTIME_FIELD_NUMBER: _ClassVar[int] + WINDOWSIZE_FIELD_NUMBER: _ClassVar[int] + DEPTH_FIELD_NUMBER: _ClassVar[int] + INCLUDEVERSIONS_FIELD_NUMBER: _ClassVar[int] + STREAMS_FIELD_NUMBER: _ClassVar[int] + queryType: GenerateCSVParams.QueryType + startTime: int + endTime: int + windowSize: int + depth: int + includeVersions: bool + streams: _containers.RepeatedCompositeFieldContainer[StreamCSVConfig] + def __init__(self, queryType: _Optional[_Union[GenerateCSVParams.QueryType, str]] = ..., startTime: _Optional[int] = ..., endTime: _Optional[int] = ..., windowSize: _Optional[int] = ..., depth: _Optional[int] = ..., includeVersions: bool = ..., streams: _Optional[_Iterable[_Union[StreamCSVConfig, _Mapping]]] = ...) -> None: ... -class StreamInfoParams(_message.Message): - __slots__ = ["omitDescriptor", "omitVersion", "role", "uuid"] - OMITDESCRIPTOR_FIELD_NUMBER: _ClassVar[int] - OMITVERSION_FIELD_NUMBER: _ClassVar[int] +class GenerateCSVResponse(_message.Message): + __slots__ = ("stat", "isHeader", "row") + STAT_FIELD_NUMBER: _ClassVar[int] + ISHEADER_FIELD_NUMBER: _ClassVar[int] + ROW_FIELD_NUMBER: _ClassVar[int] + stat: Status + isHeader: bool + row: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., isHeader: bool = ..., row: _Optional[_Iterable[str]] = ...) -> None: ... + +class SQLQueryParams(_message.Message): + __slots__ = ("query", "params", "role") + QUERY_FIELD_NUMBER: _ClassVar[int] + PARAMS_FIELD_NUMBER: _ClassVar[int] ROLE_FIELD_NUMBER: _ClassVar[int] - UUID_FIELD_NUMBER: _ClassVar[int] - omitDescriptor: bool - omitVersion: bool + query: str + params: _containers.RepeatedScalarFieldContainer[str] role: Role + def __init__(self, query: _Optional[str] = ..., params: _Optional[_Iterable[str]] = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... + +class SQLQueryResponse(_message.Message): + __slots__ = ("stat", "SQLQueryRow") + STAT_FIELD_NUMBER: _ClassVar[int] + SQLQUERYROW_FIELD_NUMBER: _ClassVar[int] + stat: Status + SQLQueryRow: _containers.RepeatedScalarFieldContainer[bytes] + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., SQLQueryRow: _Optional[_Iterable[bytes]] = ...) -> None: ... + +class Role(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... + +class SetCompactionConfigParams(_message.Message): + __slots__ = ("uuid", "CompactedVersion", "reducedResolutionRanges", "unused0") + UUID_FIELD_NUMBER: _ClassVar[int] + COMPACTEDVERSION_FIELD_NUMBER: _ClassVar[int] + REDUCEDRESOLUTIONRANGES_FIELD_NUMBER: _ClassVar[int] + UNUSED0_FIELD_NUMBER: _ClassVar[int] uuid: bytes - def __init__(self, uuid: _Optional[bytes] = ..., omitVersion: bool = ..., omitDescriptor: bool = ..., role: _Optional[_Union[Role, _Mapping]] = ...) -> None: ... + CompactedVersion: int + reducedResolutionRanges: _containers.RepeatedCompositeFieldContainer[ReducedResolutionRange] + unused0: int + def __init__(self, uuid: _Optional[bytes] = ..., CompactedVersion: _Optional[int] = ..., reducedResolutionRanges: _Optional[_Iterable[_Union[ReducedResolutionRange, _Mapping]]] = ..., unused0: _Optional[int] = ...) -> None: ... -class StreamInfoResponse(_message.Message): - __slots__ = ["descriptor", "stat", "versionMajor", "versionMinor"] - DESCRIPTOR_FIELD_NUMBER: _ClassVar[int] +class SetCompactionConfigResponse(_message.Message): + __slots__ = ("stat",) STAT_FIELD_NUMBER: _ClassVar[int] - VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] - descriptor: StreamDescriptor stat: Status - versionMajor: int - versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., descriptor: _Optional[_Union[StreamDescriptor, _Mapping]] = ...) -> None: ... + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ...) -> None: ... -class WindowsParams(_message.Message): - __slots__ = ["depth", "end", "start", "uuid", "versionMajor", "width"] - DEPTH_FIELD_NUMBER: _ClassVar[int] - END_FIELD_NUMBER: _ClassVar[int] - START_FIELD_NUMBER: _ClassVar[int] +class GetCompactionConfigParams(_message.Message): + __slots__ = ("uuid",) UUID_FIELD_NUMBER: _ClassVar[int] - VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - WIDTH_FIELD_NUMBER: _ClassVar[int] - depth: int - end: int - start: int uuid: bytes - versionMajor: int - width: int - def __init__(self, uuid: _Optional[bytes] = ..., start: _Optional[int] = ..., end: _Optional[int] = ..., versionMajor: _Optional[int] = ..., width: _Optional[int] = ..., depth: _Optional[int] = ...) -> None: ... + def __init__(self, uuid: _Optional[bytes] = ...) -> None: ... -class WindowsResponse(_message.Message): - __slots__ = ["stat", "values", "versionMajor", "versionMinor"] +class GetCompactionConfigResponse(_message.Message): + __slots__ = ("stat", "LatestMajorVersion", "CompactedVersion", "reducedResolutionRanges", "unused0") STAT_FIELD_NUMBER: _ClassVar[int] - VALUES_FIELD_NUMBER: _ClassVar[int] - VERSIONMAJOR_FIELD_NUMBER: _ClassVar[int] - VERSIONMINOR_FIELD_NUMBER: _ClassVar[int] + LATESTMAJORVERSION_FIELD_NUMBER: _ClassVar[int] + COMPACTEDVERSION_FIELD_NUMBER: _ClassVar[int] + REDUCEDRESOLUTIONRANGES_FIELD_NUMBER: _ClassVar[int] + UNUSED0_FIELD_NUMBER: _ClassVar[int] stat: Status - values: _containers.RepeatedCompositeFieldContainer[StatPoint] - versionMajor: int - versionMinor: int - def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., versionMajor: _Optional[int] = ..., versionMinor: _Optional[int] = ..., values: _Optional[_Iterable[_Union[StatPoint, _Mapping]]] = ...) -> None: ... + LatestMajorVersion: int + CompactedVersion: int + reducedResolutionRanges: _containers.RepeatedCompositeFieldContainer[ReducedResolutionRange] + unused0: int + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., LatestMajorVersion: _Optional[int] = ..., CompactedVersion: _Optional[int] = ..., reducedResolutionRanges: _Optional[_Iterable[_Union[ReducedResolutionRange, _Mapping]]] = ..., unused0: _Optional[int] = ...) -> None: ... -class MergePolicy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] +class ReducedResolutionRange(_message.Message): + __slots__ = ("Start", "End", "Resolution") + START_FIELD_NUMBER: _ClassVar[int] + END_FIELD_NUMBER: _ClassVar[int] + RESOLUTION_FIELD_NUMBER: _ClassVar[int] + Start: int + End: int + Resolution: int + def __init__(self, Start: _Optional[int] = ..., End: _Optional[int] = ..., Resolution: _Optional[int] = ...) -> None: ... + +class SubscriptionUpdate(_message.Message): + __slots__ = ("op", "uuid") + OP_FIELD_NUMBER: _ClassVar[int] + UUID_FIELD_NUMBER: _ClassVar[int] + op: SubscriptionUpdateOp + uuid: _containers.RepeatedScalarFieldContainer[bytes] + def __init__(self, op: _Optional[_Union[SubscriptionUpdateOp, str]] = ..., uuid: _Optional[_Iterable[bytes]] = ...) -> None: ... + +class SubscriptionResp(_message.Message): + __slots__ = ("stat", "uuid", "arrowBytes") + STAT_FIELD_NUMBER: _ClassVar[int] + UUID_FIELD_NUMBER: _ClassVar[int] + ARROWBYTES_FIELD_NUMBER: _ClassVar[int] + stat: Status + uuid: bytes + arrowBytes: bytes + def __init__(self, stat: _Optional[_Union[Status, _Mapping]] = ..., uuid: _Optional[bytes] = ..., arrowBytes: _Optional[bytes] = ...) -> None: ... diff --git a/btrdb/grpcinterface/btrdb_pb2_grpc.py b/btrdb/grpcinterface/btrdb_pb2_grpc.py index 52a72bd..d0db693 100644 --- a/btrdb/grpcinterface/btrdb_pb2_grpc.py +++ b/btrdb/grpcinterface/btrdb_pb2_grpc.py @@ -21,7 +21,7 @@ def __init__(self, channel): ) self.ArrowRawValues = channel.unary_stream( '/v5api.BTrDB/ArrowRawValues', - request_serializer=btrdb__pb2.RawValuesParams.SerializeToString, + request_serializer=btrdb__pb2.ArrowRawValuesParams.SerializeToString, response_deserializer=btrdb__pb2.ArrowRawValuesResponse.FromString, ) self.ArrowMultiValues = channel.unary_stream( @@ -139,6 +139,11 @@ def __init__(self, channel): request_serializer=btrdb__pb2.SQLQueryParams.SerializeToString, response_deserializer=btrdb__pb2.SQLQueryResponse.FromString, ) + self.Subscribe = channel.stream_stream( + '/v5api.BTrDB/Subscribe', + request_serializer=btrdb__pb2.SubscriptionUpdate.SerializeToString, + response_deserializer=btrdb__pb2.SubscriptionResp.FromString, + ) class BTrDBServicer(object): @@ -289,6 +294,12 @@ def GenerateCSV(self, request, context): raise NotImplementedError('Method not implemented!') def SQLQuery(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Subscribe(self, request_iterator, context): """rpc SetCompactionConfig(SetCompactionConfigParams) returns (SetCompactionConfigResponse); rpc GetCompactionConfig(GetCompactionConfigParams) returns (GetCompactionConfigResponse); """ @@ -306,7 +317,7 @@ def add_BTrDBServicer_to_server(servicer, server): ), 'ArrowRawValues': grpc.unary_stream_rpc_method_handler( servicer.ArrowRawValues, - request_deserializer=btrdb__pb2.RawValuesParams.FromString, + request_deserializer=btrdb__pb2.ArrowRawValuesParams.FromString, response_serializer=btrdb__pb2.ArrowRawValuesResponse.SerializeToString, ), 'ArrowMultiValues': grpc.unary_stream_rpc_method_handler( @@ -424,6 +435,11 @@ def add_BTrDBServicer_to_server(servicer, server): request_deserializer=btrdb__pb2.SQLQueryParams.FromString, response_serializer=btrdb__pb2.SQLQueryResponse.SerializeToString, ), + 'Subscribe': grpc.stream_stream_rpc_method_handler( + servicer.Subscribe, + request_deserializer=btrdb__pb2.SubscriptionUpdate.FromString, + response_serializer=btrdb__pb2.SubscriptionResp.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'v5api.BTrDB', rpc_method_handlers) @@ -463,7 +479,7 @@ def ArrowRawValues(request, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/v5api.BTrDB/ArrowRawValues', - btrdb__pb2.RawValuesParams.SerializeToString, + btrdb__pb2.ArrowRawValuesParams.SerializeToString, btrdb__pb2.ArrowRawValuesResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -858,3 +874,20 @@ def SQLQuery(request, btrdb__pb2.SQLQueryResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Subscribe(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/v5api.BTrDB/Subscribe', + btrdb__pb2.SubscriptionUpdate.SerializeToString, + btrdb__pb2.SubscriptionResp.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/btrdb/stream.py b/btrdb/stream.py index 7343def..e9d4ac5 100644 --- a/btrdb/stream.py +++ b/btrdb/stream.py @@ -17,14 +17,24 @@ import json import logging import re +import uuid import uuid as uuidlib import warnings from collections import deque from collections.abc import Sequence from copy import deepcopy -from typing import List +from typing import TYPE_CHECKING, Dict, List -import pyarrow as pa +import pyarrow +import pyarrow.compute as pc + +try: + import pyarrow as pa +except ImportError: + pa = None + +if TYPE_CHECKING: + import pyarrow as pa from btrdb.exceptions import ( BTrDBError, @@ -52,6 +62,9 @@ MINIMUM_TIME = -(16 << 56) MAXIMUM_TIME = (48 << 56) - 1 + +_ARROW_IMPORT_MSG = """Package pyarrow required, please pip install.""" + try: RE_PATTERN = re._pattern_type except Exception: @@ -140,6 +153,18 @@ def exists(self): ------- bool Indicates whether stream is extant in the BTrDB server. + + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> stream.uuid + UUID('...') + >>> stream.exists() + True + + """ if self._known_to_exist: @@ -165,15 +190,7 @@ def count( Compute the total number of points in the stream Counts the number of points in the specified window and version. By - default returns the latest total count of points in the stream. This - helper method sums the counts of all StatPoints returned by - ``aligned_windows``. Because of this, note that the start and end - timestamps may be adjusted if they are not powers of 2. For smaller - windows of time, you may also need to adjust the pointwidth to ensure - that the count granularity is captured appropriately. - - Alternatively you can set the precise argument to True which will - give an exact count to the nanosecond but may be slower to execute. + default, returns the latest total count of points in the stream. Parameters ---------- @@ -202,6 +219,32 @@ def count( ------- int The total number of points in the stream for the specified window. + + + .. note:: + + This helper method sums the counts of all StatPoints returned by + ``aligned_windows``. Because of this, note that the start and end + timestamps may be adjusted if they are not powers of 2. For smaller + windows of time, you may also need to adjust the ``pointwidth`` to ensure + that the count granularity is captured appropriately. + + Alternatively you can set the ``precise`` argument to ``True`` which will + give an exact count to the nanosecond but may be slower to execute. + + + + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> stream.count() + 1234 + >>> stream.count(start=1500000000000000000, end=1603680000000000000, pointwidth=55) + 567 + >>> stream.count(start=1500000000000000000, end=1603680000000000000, precise=True) + 789 """ if not precise: @@ -220,16 +263,26 @@ def count( @property def btrdb(self): """ - Returns the stream's BTrDB object. + Returns the stream's BTrDB object. - Parameters - ---------- - None + Parameters + ---------- + None - Returns - ------- - BTrDB - The BTrDB database object. + Returns + ------- + BTrDB + The BTrDB database object. + + Examples + -------- + + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> btrdb_obj = stream.btrdb + >>> btrdb_obj + """ return self._btrdb @@ -265,6 +318,14 @@ def name(self): str The name of the stream. + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> stream.name + 'foo' + """ return self.tags()["name"] @@ -280,6 +341,14 @@ def unit(self): str The unit for values of the stream. + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> stream.unit + 'volts' + """ return self.tags()["unit"] @@ -298,6 +367,14 @@ def collection(self): str the collection of the stream + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> stream.collection + 'foo/bar' + """ if self._collection is not None: return self._collection @@ -341,6 +418,20 @@ def earliest( The first data point in the stream and the version of the stream the value was retrieved at (tuple(RawPoint, int)). + + Examples + -------- + Get the earliest point for a stream using ``version`` ``0``. + + >>> stream.earliest(version=0) + (, 1234567) + + Extract just the ``RawPoint`` data. + + >>> pt, _ = stream.earliest(version=0) + >>> print(pt.time, pt.value) + 1547241923338098176 123.7 + """ start = MINIMUM_TIME return self.nearest(start, version=version, backward=False) @@ -381,6 +472,18 @@ def latest( The last data point in the stream and the version of the stream the value was retrieved at (tuple(RawPoint, int)). + Examples + -------- + Get the latest point for a stream using ``version`` ``0``. + + >>> stream.latest(version=0) + (, 1234567) + + Extract just the ``RawPoint`` data. + + >>> pt, _ = stream.latest(version=0) + >>> print(pt.time, pt.value) + 1547241923338098176 123.7 """ start = MAXIMUM_TIME return self.nearest(start, version=version, backward=True) @@ -397,7 +500,7 @@ def current( """ Returns the point that is closest to the current timestamp, e.g. the latest point in the stream up until now. Note that no future values will be returned. - Returns None if errors during lookup or there are no values before now. + Returns None if errors occur during lookup or there are no values before now. Parameters ---------- @@ -508,6 +611,25 @@ def annotations( A tuple containing a dictionary of annotations and an integer representing the version of the metadata (tuple(dict, int)). + + .. note:: + + This ``version`` is not the same as the ``stream.version``. + + Examples + -------- + Accessing a streams annotations. + + >>> stream.annotations() + ({"foo":"bar", "baz":"bazaar"}, 231) + + Extract the version and metadata separately. + + >>> annotations, metadata_version = stream.annotations() + >>> annotations + {"foo":"bar", "baz":"bazaar"} + >>> metadata_version + 231 """ if refresh or self._annotations is None: self.refresh_metadata() @@ -525,9 +647,11 @@ def version( """ Returns the current data version of the stream. - Version returns the current data version of the stream. This is not - cached, it queries each time. Take care that you do not intorduce races - in your code by assuming this function will always return the same vaue + .. warning:: + + Version returns the current data version of the stream. This is not + cached, it queries each time. Take care that you do not introduce races + in your code by assuming this function will always return the same value. Parameters ---------- @@ -577,6 +701,18 @@ def insert(self, data, merge="never"): ------- int The version of the stream after inserting new points. + + + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> data = [(1500000000000000000, 1.0), (1500000000100000000, 2.0)] + >>> stream.insert(data) + 1234 + >>> stream.insert(data, merge="replace") + 1235 """ version = 0 i = 0 @@ -613,12 +749,44 @@ def arrow_insert(self, data: pa.Table, merge: str = "never") -> int: int The version of the stream after inserting new points. - Notes - ----- - This method is available for commercial customers with arrow-enabled servers. + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + + Examples + -------- + Assuming we have a sequence of ``times`` and ``values`` where ``times`` are in nanoseconds. + Insert the data as a pyarrow table, and if there are duplicate timestamps already in the database, + replace with the new ones in ``payload``. + + >>> conn = btrdb.connect() + >>> import pyarrow as pa + >>> for t, v in zip(times, vals): + ... print(t,v) + 1500000000000000000 1.0 + 1500000000100000000 2.0 + 1500000000200000000 3.0 + 1500000000300000000 4.0 + 1500000000400000000 5.0 + 1500000000500000000 6.0 + 1500000000600000000 7.0 + 1500000000700000000 8.0 + 1500000000800000000 9.0 + 1500000000900000000 10.0 + >>> schema = pa.schema( + ... [ + ... pa.field("time", pa.timestamp("ns", tz="UTC"), nullable=False), + ... pa.field("value", pa.float64(), nullable=False), + ... ] + ... ) + >>> payload = pa.Table.from_arrays([times, vals], schema=schema) + >>> version = stream.arrow_insert(payload, merge="replace") """ if not self._btrdb._ARROW_ENABLED: raise NotImplementedError(_arrow_not_impl_str.format("arrow_insert")) + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) chunksize = INSERT_BATCH_SIZE assert isinstance(data, pa.Table) tmp_table = data.rename_columns(["time", "value"]) @@ -652,6 +820,9 @@ def arrow_insert(self, data: pa.Table, merge: str = "never") -> int: return max(version) def _update_tags_collection(self, tags, collection): + """ + :meta private: + """ tags = self.tags() if tags is None else tags collection = self.collection if collection is None else collection if collection is None: @@ -667,6 +838,9 @@ def _update_tags_collection(self, tags, collection): ) def _update_annotations(self, annotations, encoder, replace): + """ + :meta private: + """ # make a copy of the annotations to prevent accidental mutable object mutation serialized = deepcopy(annotations) if encoder is not None: @@ -712,14 +886,10 @@ def update( None, they will remain unchanged in the database. To delete either tags or annotations, you must specify exactly which - keys and values you want set for the field and set `replace=True`. For - example: + keys and values you want set for the field and set ``replace=True``. - >>> annotations, _ = stream.anotations() - >>> del annotations["key_to_delete"] - >>> stream.update(annotations=annotations, replace=True) - This ensures that all of the keys and values for the annotations are + This ensures that all the keys and values for the annotations are preserved except for the key to be deleted. Parameters @@ -735,11 +905,11 @@ def update( Specify a new collection for the stream. If None, the collection will remain unchanged. encoder : json.JSONEncoder or None - JSON encoder class to use for annotation serialization. Set to None + JSON encoder class to use for annotation serialization. Set to ``None`` to prevent JSON encoding of the annotations. replace : bool, default: False Replace all annotations or tags with the specified dictionaries - instead of performing the normal upsert operation. Specifying True + instead of performing the normal upsert operation. Specifying ``True`` is the only way to remove annotation keys. auto_retry: bool, default: False Whether to retry this request in the event of an error @@ -758,6 +928,18 @@ def update( int The version of the metadata (separate from the version of the data) also known as the "property version". + + + Examples + -------- + >>> annotations, _ = stream.anotations() + >>> del annotations["key_to_delete"] + >>> stream.update(annotations=annotations, replace=True) + 12345 + >>> annotations, _ = stream.annotations() + >>> "key_to_delete" in annotations + False + """ if tags is None and annotations is None and collection is None: raise BTRDBValueError( @@ -794,21 +976,25 @@ def delete( retry_backoff=4, ): """ - "Delete" all points between [`start`, `end`) + "Delete" all points between [``start``, ``end``) + + "Delete" all points between ``start`` (inclusive) and ``end`` (exclusive), + both in nanoseconds. + + .. note:: - "Delete" all points between `start` (inclusive) and `end` (exclusive), - both in nanoseconds. As BTrDB has persistent multiversioning, the - deleted points will still exist as part of an older version of the - stream. + As ``BTrDB`` has persistent multiversioning, the + deleted points will still exist as part of an older version of the + stream. Parameters ---------- start : int or datetime like object The start time in nanoseconds for the range to be deleted. (see - :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) + :func:``btrdb.utils.timez.to_nanoseconds`` for valid input types) end : int or datetime like object The end time in nanoseconds for the range to be deleted. (see - :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) + :func:``btrdb.utils.timez.to_nanoseconds`` for valid input types) auto_retry: bool, default: False Whether to retry this request in the event of an error retries: int, default: 5 @@ -825,6 +1011,19 @@ def delete( ------- int The version of the new stream created + + + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream = conn.stream_from_uuid("...") + >>> start = 1500000000000000000 + >>> end = 1500000001000000000 + >>> stream.delete(start, end) + 1234 + >>> stream.count(start=start, end=end) + 0 """ return self._btrdb.ep.deleteRange( self._uuid, to_nanoseconds(start), to_nanoseconds(end) @@ -877,13 +1076,13 @@ def values( version (list(tuple(RawPoint,int))). - Notes - ----- - Note that the raw data points are the original values at the sensor's - native sampling rate (assuming the time series represents measurements - from a sensor). This is the lowest level of data with the finest time - granularity. In the tree data structure of BTrDB, this data is stored in - the vector nodes. + .. note:: + + Note that the raw data points are the original values at the sensor's + native sampling rate (assuming the time series represents measurements + from a sensor). This is the lowest level of data with the finest time + granularity. In the tree data structure of BTrDB, this data is stored in + the vector nodes. """ materialized = [] @@ -905,6 +1104,7 @@ def arrow_values( retries=5, retry_delay=3, retry_backoff=4, + schema=None, ) -> pa.Table: """Read raw values from BTrDB between time [a, b) in nanoseconds. @@ -920,6 +1120,9 @@ def arrow_values( :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) version: int, default: 0 The version of the stream to be queried + schema: pyarrow.Schema + Optional arrow schema the server will cast the returned data to before sending it over + the network. You can use this to change the timestamp format, column names or data sizes. auto_retry: bool, default: False Whether to retry this request in the event of an error retries: int, default: 5 @@ -939,34 +1142,42 @@ def arrow_values( A pyarrow table of the raw values with time and value columns. - Notes - ----- - Note that the raw data points are the original values at the sensor's - native sampling rate (assuming the time series represents measurements - from a sensor). This is the lowest level of data with the finest time - granularity. In the tree data structure of BTrDB, this data is stored in - the vector nodes. + .. note:: + + Note that the raw data points are the original values at the sensor's + native sampling rate (assuming the time series represents measurements + from a sensor). This is the lowest level of data with the finest time + granularity. In the tree data structure of BTrDB, this data is stored in + the vector nodes. + + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + - This method is available for commercial customers with arrow-enabled servers. """ if not self._btrdb._ARROW_ENABLED: raise NotImplementedError(_arrow_not_impl_str.format("arrow_values")) + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) start = to_nanoseconds(start) end = to_nanoseconds(end) arrow_and_versions = self._btrdb.ep.arrowRawValues( - uu=self.uuid, start=start, end=end, version=version + uu=self.uuid, start=start, end=end, version=version, schema=schema ) tables = list(arrow_and_versions) if len(tables) > 0: tabs, ver = zip(*tables) return pa.concat_tables(tabs) else: - schema = pa.schema( - [ - pa.field("time", pa.timestamp("ns", tz="UTC"), nullable=False), - pa.field("value", pa.float64(), nullable=False), - ] - ) + if schema is None: + schema = pa.schema( + [ + pa.field("time", pa.timestamp("ns", tz="UTC"), nullable=False), + pa.field("value", pa.float64(), nullable=False), + ] + ) return pa.Table.from_arrays([pa.array([]), pa.array([])], schema=schema) @retry @@ -985,16 +1196,18 @@ def aligned_windows( Read statistical aggregates of windows of data from BTrDB. Query BTrDB for aggregates (or roll ups or windows) of the time series - with `version` between time `start` (inclusive) and `end` (exclusive) in + with `version` between time ``start`` (inclusive) and ``end`` (exclusive) in nanoseconds. Each point returned is a statistical aggregate of all the - raw data within a window of width 2**`pointwidth` nanoseconds. These + raw data within a window of width ``2**pointwidth`` nanoseconds. These statistical aggregates currently include the mean, minimum, and maximum of the data and the count of data points composing the window. - Note that `start` is inclusive, but `end` is exclusive. That is, results + .. note:: + + ``start`` is inclusive, but ``end`` is exclusive. That is, results will be returned for all windows that start in the interval [start, end). If end < start+2^pointwidth you will not get any results. If start and - end are not powers of two, the bottom pointwidth bits will be cleared. + end are not powers of two, the bottom ``pointwidth`` bits will be cleared. Each window will contain statistical summaries of the window. Statistical points with count == 0 will be omitted. @@ -1029,10 +1242,12 @@ def aligned_windows( containing data tuples. Each data tuple contains a StatPoint and the stream version. - Notes - ----- - As the window-width is a power-of-two, it aligns with BTrDB internal - tree data structure and is faster to execute than `windows()`. + + .. note:: + + As the window-width is a power-of-two, it aligns with ``BTrDB`` internal + tree data structure and is faster to execute than ``windows()``. + """ materialized = [] start = to_nanoseconds(start) @@ -1051,7 +1266,6 @@ def arrow_aligned_windows( start: int, end: int, pointwidth: int, - sort_time: bool = False, version: int = 0, auto_retry=False, retries=5, @@ -1067,25 +1281,26 @@ def arrow_aligned_windows( statistical aggregates currently include the mean, minimum, and maximum of the data and the count of data points composing the window. - Note that `start` is inclusive, but `end` is exclusive. That is, results - will be returned for all windows that start in the interval [start, end). - If end < start+2^pointwidth you will not get any results. If start and - end are not powers of two, the bottom pointwidth bits will be cleared. - Each window will contain statistical summaries of the window. - Statistical points with count == 0 will be omitted. + + .. note:: + + ``start`` is inclusive, but ``end`` is exclusive. That is, results + will be returned for all windows that start in the interval [start, end). + If end < start+2^pointwidth you will not get any results. If start and + end are not powers of two, the bottom ``pointwidth`` bits will be cleared. + Each window will contain statistical summaries of the window. + Statistical points with ``count == 0`` will be omitted. Parameters ---------- start : int or datetime like object, required The start time in nanoseconds for the range to be queried. (see - :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) + :func:``btrdb.utils.timez.to_nanoseconds`` for valid input types) end : int or datetime like object, required The end time in nanoseconds for the range to be queried. (see - :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) + :func:``btrdb.utils.timez.to_nanoseconds`` for valid input types) pointwidth : int, required Specify the number of ns between data points (2**pointwidth) - sort_time : bool, default: False - Should the table be sorted on the 'time' column? version : int, default: 0 Version of the stream to query auto_retry: bool, default: False @@ -1105,17 +1320,22 @@ def arrow_aligned_windows( pyarrow.Table Returns a pyarrow table containing the windows of data. - Notes - ----- - As the window-width is a power-of-two, it aligns with BTrDB internal - tree data structure and is faster to execute than `windows()`. - This method is available for commercial customers with arrow-enabled servers. + .. note:: + + As the window-width is a power-of-two, it aligns with BTrDB internal + tree data structure and is faster to execute than `windows()`. + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. """ if not self._btrdb._ARROW_ENABLED: raise NotImplementedError( _arrow_not_impl_str.format("arrow_aligned_windows") ) + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) if IS_DEBUG: logger.debug(f"For stream - {self.uuid} - {self.name}") @@ -1128,10 +1348,9 @@ def arrow_aligned_windows( ) if len(tables) > 0: tabs, ver = zip(*tables) - if sort_time: - return pa.concat_tables(tabs).sort_by("time") - else: - return pa.concat_tables(tabs) + # assume that time column is the first column returned + time_col = tabs[0].column_names[0] + return pa.concat_tables(tabs).sort_by(time_col) else: schema = pa.schema( [ @@ -1193,21 +1412,21 @@ def windows( containing data tuples. Each data tuple contains a StatPoint and the stream version (tuple(tuple(StatPoint, int), ...)). - Notes - ----- - Windows returns arbitrary precision windows from BTrDB. It is slower - than AlignedWindows, but still significantly faster than RawValues. Each - returned window will be `width` nanoseconds long. `start` is inclusive, - but `end` is exclusive (e.g if end < start+width you will get no - results). That is, results will be returned for all windows that start - at a time less than the end timestamp. If (`end` - `start`) is not a - multiple of width, then end will be decreased to the greatest value less - than end such that (end - start) is a multiple of `width` (i.e., we set - end = start + width * floordiv(end - start, width). The `depth` - parameter previously available has been deprecated. The only valid value - for depth is now 0. - This method is available for commercial customers with arrow-enabled servers. + .. note:: + + ``windows`` returns arbitrary precision windows from BTrDB. It is slower + than ``aligned_windows``, but can be significantly faster than raw value queries (``values``). Each + returned window will be ``width`` nanoseconds long. ``start`` is inclusive, + but ``end`` is exclusive (e.g if ``end < start+width`` you will get no + results). That is, results will be returned for all windows that start + at a time less than the end timestamp. If (``end`` - ``start``) is not a + multiple of ``width``, then ``end`` will be decreased to the greatest value less + than ``end`` such that (``end`` - ``start``) is a multiple of ``width`` (i.e., we set + ``end = start + width * floordiv(end - start, width)``. The ``depth`` + parameter previously available has been deprecated. The only valid value + for ``depth`` is now ``0``. + """ materialized = [] start = to_nanoseconds(start) @@ -1225,7 +1444,6 @@ def arrow_windows( start: int, end: int, width: int, - sort_time: bool = False, version: int = 0, auto_retry=False, retries=5, @@ -1244,8 +1462,6 @@ def arrow_windows( :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) width : int, required The number of nanoseconds in each window. - sort_time : bool, default: False - Should the table be sorted on the 'time' column. version : int, default=0, optional The version of the stream to query. auto_retry: bool, default: False @@ -1265,23 +1481,29 @@ def arrow_windows( pyarrow.Table Returns a pyarrow Table containing windows of data. - Notes - ----- - Windows returns arbitrary precision windows from BTrDB. It is slower - than AlignedWindows, but still significantly faster than RawValues. Each - returned window will be `width` nanoseconds long. `start` is inclusive, - but `end` is exclusive (e.g if end < start+width you will get no - results). That is, results will be returned for all windows that start - at a time less than the end timestamp. If (`end` - `start`) is not a - multiple of width, then end will be decreased to the greatest value less - than end such that (end - start) is a multiple of `width` (i.e., we set - end = start + width * floordiv(end - start, width). The `depth` - parameter previously available has been deprecated. The only valid value - for depth is now 0. - This method is available for commercial customers with arrow-enabled servers. + + .. note:: + + ``windows`` returns arbitrary precision windows from ``BTrDB``. It is slower + than ``aligned_windows``, but still significantly faster than RawValues. Each + returned window will be ``width`` nanoseconds long. ``start`` is inclusive, + but ``end`` is exclusive (e.g if end < start+width you will get no + results). That is, results will be returned for all windows that start + at a time less than the ``end`` timestamp. If (``end`` - ``start``) is not a + multiple of ``width``, then ``end`` will be decreased to the greatest value less + than ``end`` such that (``end`` - ``start``) is a multiple of `width` (i.e., we set + ``end = start + width * floordiv(end - start, width)``. The ``depth`` + parameter previously available has been deprecated. The only valid value + for ``depth`` is now 0. + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. """ if not self._btrdb._ARROW_ENABLED: raise NotImplementedError(_arrow_not_impl_str.format("arrow_windows")) + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) start = to_nanoseconds(start) end = to_nanoseconds(end) tables = list( @@ -1296,10 +1518,8 @@ def arrow_windows( ) if len(tables) > 0: tabs, ver = zip(*tables) - if sort_time: - return pa.concat_tables(tabs).sort_by("time") - else: - return pa.concat_tables(tabs) + time_col = tabs[0].column_names[0] + return pa.concat_tables(tabs).sort_by(time_col) else: schema = pa.schema( [ @@ -1327,17 +1547,17 @@ def nearest( """ Finds the closest point in the stream to a specified time. - Return the point nearest to the specified `time` in nanoseconds since - Epoch in the stream with `version` while specifying whether to search - forward or backward in time. If `backward` is false, the returned point - will be >= `time`. If backward is true, the returned point will be < - `time`. The version of the stream used to satisfy the query is returned. + Return the point nearest to the specified ``time`` in nanoseconds since + Epoch in the stream with ``version`` while specifying whether to search + forward or backward in time. If ``backward`` is ``false``, the returned point + will be >= ``time``. If ``backward`` is ``true``, the returned point will be < + ``time``. The ``version`` of the ``stream`` used to satisfy the query is returned. Parameters ---------- time : int or datetime like object The time (in nanoseconds since Epoch) to search near (see - :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) + :func:``btrdb.utils.timez.to_nanoseconds`` for valid input types) version : int Version of the stream to use in search backward : boolean @@ -1480,6 +1700,9 @@ def allow_window(self): return not bool(self.pointwidth or (self.width and self.depth == 0)) def _latest_versions(self): + """ + :meta private: + """ uuid_ver_tups = self._btrdb._executor.map( lambda s: (s.uuid, s.version()), self._streams ) @@ -1489,7 +1712,7 @@ def pin_versions(self, versions=None): """ Saves the stream versions that future materializations should use. If no pin is requested then the first materialization will automatically - pin the return versions. Versions can also be supplied through a dict + pin the return versions. Versions can also be supplied through a ``dict`` object with key:UUID, value:stream.version(). Parameters @@ -1502,6 +1725,10 @@ def pin_versions(self, versions=None): StreamSet Returns self + Examples + -------- + >>> version_map = {s.uuid: 0 for s in streamset} + >>> pinned_streamset = streamset.pin_versions(versions=version_map) """ if versions is not None: if not isinstance(versions, dict): @@ -1530,6 +1757,18 @@ def versions(self): dict A dict containing the stream UUID and version ints as key/values + + Examples + -------- + A pinned vs non-pinned streamset + + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> version_map = {s.uuid: 0 for s streamset} + >>> pinned_streamset = streamset.pin_versions(versions=version_map) + >>> pinned_streamset.versions() + {UUID('fa42f64a-a851-408f-aa7e-88a85b3d295c'): 0, UUID('18e5527a-ed13-424d-bb97-3e06a763609e'): 0} + >>> streamset.versions() + {UUID('fa42f64a-a851-408f-aa7e-88a85b3d295c'): 34532, UUID('18e5527a-ed13-424d-bb97-3e06a763609e'): 12345} """ return ( self._pinned_versions if self._pinned_versions else self._latest_versions() @@ -1544,10 +1783,6 @@ def count(self, precise: bool = False): all points in the streams. The count is modified by start and end filters or by pinning versions. - Note that this helper method sums the counts of all StatPoints returned - by ``aligned_windows``. Because of this the start and end timestamps - may be adjusted if they are not powers of 2. - Parameters ---------- precise : bool, default = False @@ -1557,6 +1792,27 @@ def count(self, precise: bool = False): ------- int The total number of points in all streams for the specified filters. + + .. note:: + + Note that this helper method sums the counts of all StatPoints returned + by ``aligned_windows``. Because of this the start and end timestamps + may be adjusted if they are not powers of 2. + + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream1 = conn.stream_from_uuid("...") + >>> stream2 = conn.stream_from_uuid("...") + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> streamset.count() + 2345 + >>> filtered_streamset = streamset.filter(start=1500000000000000000, end=1500000001000000000) + >>> filtered_streamset.count(precise=True) + 734 + >>> streamset.filter(start=1500000000000000000, end=1500000001000000000).count(precise=True) + 734 """ params = self._params_from_filters() start = params.get("start", MINIMUM_TIME) @@ -1588,6 +1844,15 @@ def earliest(self): tuple The earliest points of data found among all streams + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream1 = conn.stream_from_uuid("...") + >>> stream2 = conn.stream_from_uuid("...") + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> streamset.earliest() + (, ) """ earliest = [] params = self._params_from_filters() @@ -1599,8 +1864,11 @@ def earliest(self): lambda s: s.nearest(start, version=versions.get(s.uuid, 0), backward=False), self._streams, ) - for point, _ in earliest_points_gen: - earliest.append(point) + for point in earliest_points_gen: + if point is not None: + earliest.append(point[0]) + else: + earliest.append(None) return tuple(earliest) @@ -1617,6 +1885,15 @@ def latest(self): tuple The latest points of data found among all streams + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream1 = conn.stream_from_uuid("...") + >>> stream2 = conn.stream_from_uuid("...") + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> streamset.earliest() + (, ) """ latest = [] params = self._params_from_filters() @@ -1626,8 +1903,11 @@ def latest(self): lambda s: s.nearest(start, version=versions.get(s.uuid, 0), backward=True), self._streams, ) - for point, _ in latest_points_gen: - latest.append(point) + for point in latest_points_gen: + if point is not None: + latest.append(point[0]) + else: + latest.append(None) return tuple(latest) @@ -1641,6 +1921,16 @@ def current(self): ------- tuple The latest points of data found among all streams + + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream1 = conn.stream_from_uuid("...") + >>> stream2 = conn.stream_from_uuid("...") + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> streamset.current() + (, ) """ latest = [] params = self._params_from_filters() @@ -1654,12 +1944,15 @@ def current(self): ) versions = self.versions() - latest_points_gen = self._btrdb._executor.map( + current_points_gen = self._btrdb._executor.map( lambda s: (s.nearest(now, version=versions.get(s.uuid, 0), backward=True)), self._streams, ) - for point in latest_points_gen: - latest.append(point) + for point in current_points_gen: + if point is not None: + latest.append(point[0]) + else: + latest.append(None) return tuple(latest) @@ -1673,6 +1966,7 @@ def filter( tags=None, annotations=None, sampling_frequency=None, + schema=None, ): """ Provides a new StreamSet instance containing stored query parameters and @@ -1710,25 +2004,61 @@ def filter( key/value pairs for filtering streams based on annotations sampling_frequency : float The sampling frequency of the data streams in Hz, set this if you want timesnapped values. + schema: pyarrow.Schema + Optional arrow schema the server will cast the returned data to before sending it over + the network. You can use this to change the timestamp format, column names or data sizes. Returns ------- StreamSet a new instance cloned from the original with filters applied - Notes - ----- - If you set `sampling_frequency` to a non-zero value, the stream data returned will be aligned to a - grid of timestamps based on the period of the sampling frequency. For example, a sampling rate of 30hz will - have a sampling period of 1/30hz -> ~33_333_333 ns per sample. Leave sampling_frequency as None, or set to 0 to - prevent time alignment. You should **not** use aligned data for frequency-based analysis. + + .. note:: + + If you set ``sampling_frequency`` to a non-zero value, the stream data returned will be aligned to a + grid of timestamps based on the period of the sampling frequency. For example, a sampling rate of 30hz will + have a sampling period of 1/30hz -> ~33_333_333 ns per sample. Leave ``sampling_frequency`` as ``None``, or set to ``0`` to + prevent time alignment. You should **not** use aligned data for frequency-based analysis. + + + Examples + -------- + create a streamset and apply a few filters + + >>> streamset = btrdb.stream.StreamSet(list_of_streams) + >>> print(f"Total streams: {len(streamset)}") + Total streams: 89 + + >>> streamset.filter(units="Volts") + >>> print(f"Total streams: {len(streamset)}") + Total streams: 89 + + >>> filtered_streamset = streamset.filter(units="Volts") + >>> print(f"Total streams: {len(filtered_streamset)}") + Total streams: 23 + + >>> multiple_filters_streamset = (streamset.filter(unit="Volts") + >>> .filter(name="Sensor 1") + >>> .filter(annotations={"phase":"A"}) + >>> ) + >>> print(f"Total streams: {len(multiple_filters_streamset)}") + Total streams: 1 """ obj = self.clone() - if start is not None or end is not None or sampling_frequency is not None: + if ( + start is not None + or end is not None + or sampling_frequency is not None + or schema is not None + ): obj.filters.append( StreamFilter( - start=start, end=end, sampling_frequency=sampling_frequency + start=start, + end=end, + sampling_frequency=sampling_frequency, + schema=schema, ) ) @@ -1839,19 +2169,35 @@ def windows(self, width, depth=0): Returns self - Notes - ----- - Windows returns arbitrary precision windows from BTrDB. It is slower - than aligned_windows, but still significantly faster than values. Each - returned window will be width nanoseconds long. start is inclusive, but - end is exclusive (e.g. if end < start+width you will get no results). - That is, results will be returned for all windows that start at a time - less than the end timestamp. If (end - start) is not a multiple of - width, then end will be decreased to the greatest value less than end - such that (end - start) is a multiple of width (i.e., we set end = start - + width * floordiv(end - start, width)). The `depth` parameter previously - available has been deprecated. The only valid value for depth is now 0. + .. note:: + + ``windows`` returns arbitrary precision windows from BTrDB. It is slower + than ``aligned_windows``, but can be significantly faster than values. + Each returned window will be ``width`` nanoseconds long. + ``start`` is inclusive, but ``end`` is exclusive ( ``[start, end) ) + (e.g. if end < start+width you will get no results). + That is, results will be returned for all windows that start at a time + less than the end timestamp. If (``end`` - ``start``) is not a multiple of + ``width``, then ``end`` will be decreased to the greatest value less than ``end`` + such that (end - start) is a multiple of width (i.e., we set end = start + + width * floordiv(end - start, width)). The ``depth`` parameter previously + available has been deprecated. The only valid value for ``depth`` is now ``0``. + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream1 = conn.stream_from_uuid("...") + >>> stream2 = conn.stream_from_uuid("...") + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> streamset.windows(width=1000000000) + + >>> streamset.windows(width=1000000000, depth=0) + + >>> streamset.aligned_windows(pointwidth=30) + Traceback (most recent call last): + ... + btrdb.exceptions.InvalidOperation: A window operation is already requested """ if not self.allow_window: raise InvalidOperation("A window operation is already requested") @@ -1880,18 +2226,32 @@ def aligned_windows(self, pointwidth): StreamSet Returns self - Notes - ----- - `aligned_windows` reads power-of-two aligned windows from BTrDB. It is - faster than Windows(). Each returned window will be 2^pointwidth - nanoseconds long, starting at start. Note that start is inclusive, but - end is exclusive. That is, results will be returned for all windows that - start in the interval [start, end). If end < start+2^pointwidth you will - not get any results. If start and end are not powers of two, the bottom - pointwidth bits will be cleared. Each window will contain statistical - summaries of the window. Statistical points with count == 0 will be - omitted. + .. note:: + + ``aligned_windows`` reads power-of-two aligned windows from BTrDB. It is + faster than ``windows()``. Each returned window will be 2^``pointwidth`` + nanoseconds long, beginning at ``start``. Note that start is inclusive, but + end is exclusive. That is, results will be returned for all windows that + start in the interval ``[start, end)``. If ``end`` < ``start``+2^``pointwidth`` you will + not get any results. If ``start`` and ``end`` are not powers of two, the bottom + ``pointwidth`` bits will be cleared. Each window will contain statistical + summaries of the window. Statistical points with ``count`` == 0 will be + omitted. + + Examples + -------- + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream1 = conn.stream_from_uuid("...") + >>> stream2 = conn.stream_from_uuid("...") + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> streamset.aligned_windows(pointwidth=30) + + >>> streamset.windows(width=1000000000) + Traceback (most recent call last): + ... + btrdb.exceptions.InvalidOperation: A window operation is already requested """ if not self.allow_window: raise InvalidOperation("A window operation is already requested") @@ -1908,6 +2268,8 @@ def _streamset_data(self, as_iterators=False): ---------- as_iterators : bool Returns each single stream's data as an iterator. Defaults to False. + + :meta private: """ params = self._params_from_filters() # sampling freq not supported for non-arrow streamset ops @@ -1966,6 +2328,20 @@ def rows(self): A list of tuples containing a RawPoint (or StatPoint) and the stream version (list(tuple(RawPoint, int))). + Examples + -------- + >>> for row in streams.rows(): + >>> print(row) + (None, RawPoint(1500000000000000000, 1.0), RawPoint(1500000000000000000, 1.0), RawPoint(1500000000000000000, 1.0)) + (RawPoint(1500000000100000000, 2.0), None, RawPoint(1500000000100000000, 2.0), RawPoint(1500000000100000000, 2.0)) + (None, RawPoint(1500000000200000000, 3.0), None, RawPoint(1500000000200000000, 3.0)) + (RawPoint(1500000000300000000, 4.0), None, RawPoint(1500000000300000000, 4.0), RawPoint(1500000000300000000, 4.0)) + (None, RawPoint(1500000000400000000, 5.0), RawPoint(1500000000400000000, 5.0), RawPoint(1500000000400000000, 5.0)) + (RawPoint(1500000000500000000, 6.0), None, None, RawPoint(1500000000500000000, 6.0)) + (None, RawPoint(1500000000600000000, 7.0), RawPoint(1500000000600000000, 7.0), RawPoint(1500000000600000000, 7.0)) + (RawPoint(1500000000700000000, 8.0), None, RawPoint(1500000000700000000, 8.0), RawPoint(1500000000700000000, 8.0)) + (None, RawPoint(1500000000800000000, 9.0), RawPoint(1500000000800000000, 9.0), RawPoint(1500000000800000000, 9.0)) + (RawPoint(1500000000900000000, 10.0), None, RawPoint(1500000000900000000, 10.0), RawPoint(1500000000900000000, 10.0)) """ result = [] streamset_data = self._streamset_data(as_iterators=True) @@ -2015,14 +2391,34 @@ def insert(self, data_map: dict, merge: str = "never") -> dict: - 'retain': if two points have the same timestamp, the old one is kept - 'replace': if two points have the same timestamp, the new one is kept - Notes - ----- - You MUST convert your datetimes into utc+0 yourself. BTrDB expects utc+0 datetimes. Returns ------- dict[uuid, int] The versions of the stream after inserting new points. + + + .. note:: + + You MUST convert your datetimes into UTC+0 **yourself**. BTrDB expects UTC+0 datetimes. + + + Examples + -------- + >>> import pandas as pd + >>> import btrdb + >>> conn = btrdb.connect() + >>> stream1 = conn.stream_from_uuid("...") + >>> stream2 = conn.stream_from_uuid("...") + >>> streamset = btrdb.stream.StreamSet([stream1, stream2]) + >>> data_map = { + ... stream1.uuid: pd.DataFrame({'time': [1500000000000000000, 1500000000100000000], 'value': [1.0, 2.0]}), + ... stream2.uuid: pd.DataFrame({'time': [1500000000000000000, 1500000000100000000], 'value': [3.0, 4.0]}) + ... } + >>> streamset.insert(data_map) + {UUID('...'): 1234, UUID('...'): 5678} + >>> streamset.insert(data_map, merge='replace') + {UUID('...'): 1235, UUID('...'): 5679} """ filtered_data_map = {s.uuid: data_map[s.uuid] for s in self._streams} for key, dat in filtered_data_map.items(): @@ -2083,6 +2479,9 @@ def arrow_insert(self, data_map: dict, merge: str = "never") -> dict: return versions def _params_from_filters(self): + """ + :meta private: + """ params = {} for filter in self.filters: if filter.start is not None: @@ -2091,11 +2490,15 @@ def _params_from_filters(self): params["end"] = filter.end if filter.sampling_frequency is not None: params["sampling_frequency"] = filter.sampling_frequency + if filter.schema is not None: + params["schema"] = filter.schema return params def values_iter(self): """ Must return context object which would then close server cursor on __exit__ + + :meta private: """ raise NotImplementedError() @@ -2116,10 +2519,12 @@ def arrow_values( This data will be sorted by the 'time' column. - Notes - ----- - This method is available for commercial customers with arrow-enabled servers. + .. note:: + + This method is available for commercial customers with arrow-enabled servers. """ + if pa is None: + raise ImportError(_ARROW_IMPORT_MSG) params = self._params_from_filters() versions = self._pinned_versions if versions is None: @@ -2129,6 +2534,10 @@ def arrow_values( _ = params.pop("sampling_frequency", None) if self.pointwidth is not None: + if params.pop("schema", None) is not None: + raise NotImplementedError( + "aligned windows queries do not yet support an arrow schema" + ) # create list of stream.aligned_windows data params.update({"pointwidth": self.pointwidth}) _ = params.pop("sampling_frequency", None) @@ -2144,27 +2553,18 @@ def arrow_values( ) stream_uus = [str(s.uuid) for s in self._streams] data = list(aligned_windows_gen) - tablex = data.pop(0) - uu = stream_uus.pop(0) - tab_columns = [ - c if c == "time" else uu + "/" + c for c in tablex.column_names - ] - tablex = tablex.rename_columns(tab_columns) - if data: - for tab, uu in zip(data, stream_uus): - tab_columns = [ - c if c == "time" else uu + "/" + c for c in tab.column_names - ] - tab = tab.rename_columns(tab_columns) - tablex = tablex.join(tab, "time", join_type="full outer") - data = tablex - else: - data = tablex - data = data.sort_by("time") + table_joined = _merge_pyarrow_tables( + {uu: tab for uu, tab in zip(stream_uus, data)} + ) + data = table_joined elif self.width is not None and self.depth is not None: # create list of stream.windows data (the windows method should # prevent the possibility that only one of these is None) + if params.pop("schema", None) is not None: + raise NotImplementedError( + "windows queries do not yet support an arrow schema" + ) _ = params.pop("sampling_frequency", None) params.update({"width": self.width}) windows_gen = self._btrdb._executor.map( @@ -2175,23 +2575,10 @@ def arrow_values( ) stream_uus = [str(s.uuid) for s in self._streams] data = list(windows_gen) - tablex = data.pop(0) - uu = stream_uus.pop(0) - tab_columns = [ - c if c == "time" else uu + "/" + c for c in tablex.column_names - ] - tablex = tablex.rename_columns(tab_columns) - if data: - for tab, uu in zip(data, stream_uus): - tab_columns = [ - c if c == "time" else uu + "/" + c for c in tab.column_names - ] - tab = tab.rename_columns(tab_columns) - tablex = tablex.join(tab, "time", join_type="full outer") - data = tablex - else: - data = tablex - data = data.sort_by("time") + table_joined = _merge_pyarrow_tables( + {uu: tab for uu, tab in zip(stream_uus, data)} + ) + data = table_joined else: sampling_freq = params.pop("sampling_frequency", 0) period_ns = 0 @@ -2204,17 +2591,20 @@ def arrow_values( if len(table) > 0: data = pa.concat_tables(table) else: - schema = pa.schema( - [pa.field("time", pa.timestamp("ns", tz="UTC"), nullable=False)] - + [ - pa.field(str(s.uuid), pa.float64(), nullable=False) - for s in self._streams - ], - ) + schema = params.pop("schema", None) + if schema is None: + schema = pa.schema( + [pa.field("time", pa.timestamp("ns", tz="UTC"), nullable=False)] + + [ + pa.field(str(s.uuid), pa.float64(), nullable=False) + for s in self._streams + ], + ) data = pa.Table.from_arrays( [pa.array([]) for i in range(1 + len(self._streams))], schema=schema ) - return data + time_col = data.column_names[0] + return data.sort_by(time_col) def __repr__(self): token = "stream" if len(self) == 1 else "streams" @@ -2270,10 +2660,15 @@ class StreamFilter(object): """ def __init__( - self, start: int = None, end: int = None, sampling_frequency: int = None + self, + start: int = None, + end: int = None, + sampling_frequency: int = None, + schema=None, ): self.start = to_nanoseconds(start) if start else None self.end = to_nanoseconds(end) if end else None + self.schema = schema self.sampling_frequency = sampling_frequency if sampling_frequency else None if self.start is None and self.end is None: @@ -2300,3 +2695,77 @@ def _coalesce_table_deque(tables: deque): t2, "time", join_type="full outer", right_suffix=f"_{idx}" ) return main_table + + +def _extract_unique_times(stream_map: Dict[uuid.UUID, pa.Table]) -> pa.Array: + """Extracts and returns unique 'time' values from all tables.""" + time_arrays = [ + table.column("time").combine_chunks() + for table in stream_map.values() + if table.num_rows > 0 + ] + if not time_arrays: # Check if the list is empty + return pa.array([], type=pa.timestamp("ns")) + + all_times = pa.concat_arrays(time_arrays) + return pc.unique(all_times).sort() + + +def _build_combined_schema( + stream_map: Dict[uuid.UUID, pa.Table], unique_times: pa.Array +) -> pa.Schema: + """Constructs a combined schema for the merged table, ensuring unique column names.""" + combined_schema = [("time", unique_times.type)] + # Use a list comprehension to flatten the loop into a single iterable over all columns + combined_schema += [ + pa.field( + f"{uu}/{col_name}", + table.column(col_name).type if table.num_rows > 0 else pa.null(), + ) + for uu, table in stream_map.items() + for col_name in table.column_names + if col_name != "time" + ] + + return pa.schema(combined_schema) + + +def _merge_pyarrow_tables(stream_map: Dict[uuid.UUID, pa.Table]) -> pa.Table: + """Merges PyArrow tables based on 'time' values into a single table.""" + unique_times = _extract_unique_times(stream_map) + combined_schema = _build_combined_schema(stream_map, unique_times) + + none_data = [None] * len(unique_times) + preallocated_data = { + field.name: pa.array(none_data, type=field.type) for field in combined_schema + } + preallocated_data["time"] = unique_times + + for uu, table in stream_map.items(): + if table.num_rows > 0: + time_indices = pc.index_in( + preallocated_data["time"], + value_set=table.column("time"), + skip_nulls=True, + ) + for col_name in table.column_names: + if col_name == "time": + continue + combined_col_name = f"{str(uu)}/{col_name}" + preallocated_data[combined_col_name] = pc.take( + table.column(col_name), indices=time_indices + ) + else: + # For empty tables, ensure their columns are represented with all nulls + for col_name in combined_schema.names: + if ( + col_name.startswith(f"{str(uu)}/") + and col_name not in preallocated_data + ): + field_type = combined_schema.field(col_name).type + preallocated_data[col_name] = pa.array( + [None] * preallocated_data["time"].length(), type=field_type + ) + + arrays = [preallocated_data[col] for col in combined_schema.names] + return pa.Table.from_arrays(arrays=arrays, schema=combined_schema) diff --git a/btrdb/transformers.py b/btrdb/transformers.py index 9bcf7b5..dcdee4f 100644 --- a/btrdb/transformers.py +++ b/btrdb/transformers.py @@ -18,11 +18,34 @@ import contextlib import csv from collections import OrderedDict -from typing import Sequence -from warnings import warn - -import pandas as pd -import pyarrow +from typing import TYPE_CHECKING + +try: + import pyarrow as pa +except ImportError: + pa = None +try: + import polars as pl +except ImportError: + pl = None +try: + import pandas as pd +except ImportError: + pd = None +try: + import numpy as np +except ImportError: + np = None + +if TYPE_CHECKING: + import numpy as np + import pandas as pd + import polars as pl + import pyarrow as pa + +_IMPORT_ERR_MSG = ( + """Package(s) expected, but not found. Please pip install the following: {}""" +) ########################################################################## ## Helper Functions @@ -71,11 +94,14 @@ def to_series(streamset, datetime64_index=True, agg="mean", name_callable=None): Specify a callable that can be used to determine the series name given a Stream object. + + .. note:: + + This method does **not** use the ``arrow`` -accelerated endpoints for faster and more efficient data retrieval. + """ - try: - import pandas as pd - except ImportError: - raise ImportError("Please install Pandas to use this transformation function.") + if pd is None: + raise ImportError(_IMPORT_ERR_MSG.format("pandas")) # TODO: allow this at some future point if agg == "all": @@ -97,13 +123,13 @@ def to_series(streamset, datetime64_index=True, agg="mean", name_callable=None): values.append(getattr(point, agg)) if datetime64_index: - times = pd.Index(times, dtype="datetime64[ns]") + times = pd.Index(times, dtype="datetime64[ns]", name="time") result.append(pd.Series(data=values, index=times, name=stream_names[idx])) return result -def arrow_to_series(streamset, agg="mean", name_callable=None): +def arrow_to_series(streamset, agg=None, name_callable=None): """ Returns a list of Pandas Series objects indexed by time @@ -118,14 +144,89 @@ def arrow_to_series(streamset, agg="mean", name_callable=None): Specify a callable that can be used to determine the series name given a Stream object. - Notes - ----- - This method is available for commercial customers with arrow-enabled servers. + + Returns + ------- + List[pandas.Series] + + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + + + .. note:: + + If you are not performing a ``window`` or ``aligned_window`` query, the ``agg`` parameter will be ignored. + + Examples + -------- + Return a list of series of raw data per stream. + + >>> conn = btrdb.connect() + >>> s1 = conn.stream_from_uuid('c9fd8735-5ec5-4141-9a51-d23e1b2dfa42') + >>> s2 = conn.stream_from_uuid('9173fa70-87ab-4ac8-ac08-4fd63b910cae' + >>> streamset = btrdb.stream.StreamSet([s1,s2]) + >>> streamset.filter(start=1500000000000000000, end=1500000000900000001).arrow_to_series(agg=None) + [time + 2017-07-14 02:40:00+00:00 1.0 + 2017-07-14 02:40:00.100000+00:00 2.0 + 2017-07-14 02:40:00.200000+00:00 3.0 + 2017-07-14 02:40:00.300000+00:00 4.0 + 2017-07-14 02:40:00.400000+00:00 5.0 + 2017-07-14 02:40:00.500000+00:00 6.0 + 2017-07-14 02:40:00.600000+00:00 7.0 + 2017-07-14 02:40:00.700000+00:00 8.0 + 2017-07-14 02:40:00.800000+00:00 9.0 + 2017-07-14 02:40:00.900000+00:00 10.0 + Name: new/stream/collection/foo, dtype: double[pyarrow], + time + 2017-07-14 02:40:00+00:00 1.0 + 2017-07-14 02:40:00.100000+00:00 2.0 + 2017-07-14 02:40:00.200000+00:00 3.0 + 2017-07-14 02:40:00.300000+00:00 4.0 + 2017-07-14 02:40:00.400000+00:00 5.0 + 2017-07-14 02:40:00.500000+00:00 6.0 + 2017-07-14 02:40:00.600000+00:00 7.0 + 2017-07-14 02:40:00.700000+00:00 8.0 + 2017-07-14 02:40:00.800000+00:00 9.0 + 2017-07-14 02:40:00.900000+00:00 10.0 + Name: new/stream/bar, dtype: double[pyarrow]] + + + A window query of 0.5seconds long. + + >>> streamset.filter(start=1500000000000000000, end=1500000000900000001) + ... .windows(width=int(0.5 * 10**9)) + ... .arrow_to_series(agg=["mean", "count"]) + [time + 2017-07-14 02:40:00+00:00 2.5 + 2017-07-14 02:40:00.400000+00:00 6.5 + Name: new/stream/collection/foo/mean, dtype: double[pyarrow], + ... + time + 2017-07-14 02:40:00+00:00 4 + 2017-07-14 02:40:00.400000+00:00 4 + Name: new/stream/collection/foo/count, dtype: uint64[pyarrow], + ... + time + 2017-07-14 02:40:00+00:00 2.5 + 2017-07-14 02:40:00.400000+00:00 6.5 + Name: new/stream/bar/mean, dtype: double[pyarrow], + ... + time + 2017-07-14 02:40:00+00:00 4 + 2017-07-14 02:40:00.400000+00:00 4 + Name: new/stream/bar/count, dtype: uint64[pyarrow]] + + """ if not streamset._btrdb._ARROW_ENABLED: raise NotImplementedError( "arrow_to_series requires an arrow-enabled BTrDB server." ) + if pa is None or pd is None: + raise ImportError(_IMPORT_ERR_MSG.format(",".join(["pyarrow", "pandas"]))) if agg is None: agg = ["mean"] if not isinstance(agg, list): @@ -138,18 +239,13 @@ def arrow_to_series(streamset, agg="mean", name_callable=None): return [arrow_df[col] for col in arrow_df] -def arrow_to_dataframe( - streamset, columns=None, agg=None, name_callable=None -) -> pd.DataFrame: +def arrow_to_dataframe(streamset, agg=None, name_callable=None) -> pd.DataFrame: """ Returns a Pandas DataFrame object indexed by time and using the values of a stream for each column. Parameters ---------- - columns: sequence - column names to use for DataFrame. Deprecated and not compatible with name_callable. - agg : List[str], default: ["mean"] Specify the StatPoint fields (e.g. aggregating function) to create the dataframe from. Must be one or more of "min", "mean", "max", "count", "stddev", or "all". This @@ -159,29 +255,94 @@ def arrow_to_dataframe( Specify a callable that can be used to determine the series name given a Stream object. - Notes - ----- - This method is available for commercial customers with arrow-enabled servers. + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + + + Examples + -------- + + >>> conn = btrdb.connect() + >>> s1 = conn.stream_from_uuid('c9fd8735-5ec5-4141-9a51-d23e1b2dfa42') + >>> s2 = conn.stream_from_uuid('9173fa70-87ab-4ac8-ac08-4fd63b910cae' + >>> streamset = btrdb.stream.StreamSet([s1,s2]) + >>> streamset.filter(start=1500000000000000000, end=1500000000900000001).arrow_to_dataframe() + new/stream/collection/foo new/stream/bar + time + 2017-07-14 02:40:00+00:00 1.0 1.0 + 2017-07-14 02:40:00.100000+00:00 2.0 2.0 + 2017-07-14 02:40:00.200000+00:00 3.0 3.0 + 2017-07-14 02:40:00.300000+00:00 4.0 4.0 + 2017-07-14 02:40:00.400000+00:00 5.0 5.0 + 2017-07-14 02:40:00.500000+00:00 6.0 6.0 + 2017-07-14 02:40:00.600000+00:00 7.0 7.0 + 2017-07-14 02:40:00.700000+00:00 8.0 8.0 + 2017-07-14 02:40:00.800000+00:00 9.0 9.0 + 2017-07-14 02:40:00.900000+00:00 10.0 10.0 + + + Use the stream uuids as their column names instead, using a lambda function. + + >>> streamset.filter(start=1500000000000000000, end=1500000000900000001) + ... .arrow_to_dataframe( + ... name_callable=lambda s: str(s.uuid) + ... ) + c9fd8735-5ec5-4141-9a51-d23e1b2dfa42 9173fa70-87ab-4ac8-ac08-4fd63b910cae + time + 2017-07-14 02:40:00+00:00 1.0 1.0 + 2017-07-14 02:40:00.100000+00:00 2.0 2.0 + 2017-07-14 02:40:00.200000+00:00 3.0 3.0 + 2017-07-14 02:40:00.300000+00:00 4.0 4.0 + 2017-07-14 02:40:00.400000+00:00 5.0 5.0 + 2017-07-14 02:40:00.500000+00:00 6.0 6.0 + 2017-07-14 02:40:00.600000+00:00 7.0 7.0 + 2017-07-14 02:40:00.700000+00:00 8.0 8.0 + 2017-07-14 02:40:00.800000+00:00 9.0 9.0 + 2017-07-14 02:40:00.900000+00:00 10.0 10.0 + + + A window query, with a window width of 0.4 seconds, and only showing the ``mean`` statpoint. + + >>> streamset.filter(start=1500000000000000000, end=1500000000900000001) + ... .windows(width=int(0.4*10**9)) + ... .arrow_to_dataframe(agg=["mean"]) + new/stream/collection/foo/mean new/stream/bar/mean + time + 2017-07-14 02:40:00+00:00 2.5 2.5 + 2017-07-14 02:40:00.400000+00:00 6.5 6.5 + + + A window query, with a window width of 0.4 seconds, and only showing the ``mean`` and ``count`` statpoints. + + >>> streamset.filter(start=1500000000000000000, end=1500000000900000001) + ... .windows(width=int(0.4*10**9)) + ... .arrow_to_dataframe(agg=["mean", "count"]) + new/stream/collection/foo/mean new/stream/collection/foo/count new/stream/bar/mean new/stream/bar/count + time + 2017-07-14 02:40:00+00:00 2.5 4 2.5 4 + 2017-07-14 02:40:00.400000+00:00 6.5 4 6.5 4 """ + + def _rename(col_name, col_names_map): + if col_name == "time": + return col_name + col_name_parts = col_name.split("/") + if len(col_name_parts) == 1: + uuid = col_name_parts[0] + new_col_name = col_names_map[uuid] + elif len(col_name_parts) == 2: + uuid, _ = col_name_parts + new_col_name = col_name.replace(uuid, col_names_map[uuid]) + return new_col_name + if not streamset._btrdb._ARROW_ENABLED: raise NotImplementedError( "arrow_to_dataframe requires an arrow-enabled BTrDB server." ) - - try: - import pandas as pd - import pyarrow as pa - except ImportError as err: - raise ImportError( - f"Please install Pandas and pyarrow to use this transformation function. ErrorMessage: {err}" - ) - # deprecation warning added in v5.8 - if columns: - warn( - "the columns argument is deprecated and will be removed in a future release", - DeprecationWarning, - stacklevel=2, - ) + if pa is None or pd is None: + raise ImportError(_IMPORT_ERR_MSG.format(",".join(["pyarrow", "pandas"]))) if agg is None: agg = ["mean"] @@ -201,42 +362,40 @@ def arrow_to_dataframe( if not callable(name_callable): name_callable = lambda s: s.collection + "/" + s.name # format is: uuid/stat_type - tmp_table = streamset.arrow_values() + tmp = streamset.arrow_values() + # assume time col is the first column + time_col = tmp.column_names[0] + + tmp_df = tmp.to_pandas( + date_as_object=False, + types_mapper=pd.ArrowDtype, + split_blocks=True, + self_destruct=True, + ).set_index(time_col) + tmp_df.index.name = time_col col_names = _stream_names(streamset, name_callable) col_names_map = {str(s.uuid): c for s, c in zip(streamset, col_names)} - updated_table_columns = [] - for old_col in tmp_table.column_names: - if old_col == "time": - updated_table_columns.append("time") - else: - for uu, new_name in col_names_map.items(): - if uu in old_col: - updated_table_columns.append(old_col.replace(uu, new_name)) - else: - continue - tmp_table = tmp_table.rename_columns(updated_table_columns) + tmp_df.rename( + columns=lambda col_name: _rename(col_name, col_names_map), inplace=True + ) if not streamset.allow_window: usable_cols = [] - for column_str in tmp_table.column_names: + for column_str in tmp_df.columns: for agg_name in agg: if agg_name in column_str: usable_cols.append(column_str) - tmp = tmp_table.select(["time", *usable_cols]) - else: - tmp = tmp_table - return tmp.to_pandas(date_as_object=False, types_mapper=pd.ArrowDtype) + tmp_df = tmp_df.loc[:, usable_cols] + + return tmp_df -def to_dataframe(streamset, columns=None, agg="mean", name_callable=None): +def to_dataframe(streamset, agg="mean", name_callable=None): """ Returns a Pandas DataFrame object indexed by time and using the values of a stream for each column. Parameters ---------- - columns: sequence - column names to use for DataFrame. Deprecated and not compatible with name_callable. - agg : str, default: "mean" Specify the StatPoint field (e.g. aggregating function) to create the Series from. Must be one of "min", "mean", "max", "count", "stddev", or "all". This @@ -247,19 +406,13 @@ def to_dataframe(streamset, columns=None, agg="mean", name_callable=None): Stream object. This is not compatible with agg == "all" at this time + .. note:: + + This method does **not** use the ``arrow`` -accelerated endpoints for faster and more efficient data retrieval. + """ - try: - import pandas as pd - except ImportError: - raise ImportError("Please install Pandas to use this transformation function.") - - # deprecation warning added in v5.8 - if columns: - warn( - "the columns argument is deprecated and will be removed in a future release", - DeprecationWarning, - stacklevel=2, - ) + if pd is None: + raise ImportError(_IMPORT_ERR_MSG.format("pandas")) # TODO: allow this at some future point if agg == "all" and name_callable is not None: @@ -279,6 +432,7 @@ def to_dataframe(streamset, columns=None, agg="mean", name_callable=None): if not df.empty: df = df.set_index("time") + df.index.name = "time" if agg == "all" and not streamset.allow_window: stream_names = [ @@ -288,7 +442,7 @@ def to_dataframe(streamset, columns=None, agg="mean", name_callable=None): ] df.columns = pd.MultiIndex.from_tuples(stream_names) else: - df.columns = columns if columns else _stream_names(streamset, name_callable) + df.columns = _stream_names(streamset, name_callable) return df @@ -309,18 +463,20 @@ def arrow_to_polars(streamset, agg=None, name_callable=None): Specify a callable that can be used to determine the series name given a Stream object. - Notes - ----- - This method is available for commercial customers with arrow-enabled servers. + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + """ if not streamset._btrdb._ARROW_ENABLED: raise NotImplementedError( "arrow_to_polars requires an arrow-enabled BTrDB server." ) - try: - import polars as pl - except ImportError: - raise ImportError("Please install polars to use this transformation function.") + if pa is None or pd is None or pl is None: + raise ImportError( + _IMPORT_ERR_MSG.format(",".join(["pyarrow", "pandas", "polars"])) + ) if agg is None: agg = ["mean"] if not isinstance(agg, list): @@ -336,14 +492,18 @@ def arrow_to_polars(streamset, agg=None, name_callable=None): def arrow_to_arrow_table(streamset): """Return a pyarrow table of data. - Notes - ----- - This method is available for commercial customers with arrow-enabled servers. + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + """ if not streamset._btrdb._ARROW_ENABLED: raise NotImplementedError( "arrow_to_arrow_table requires an arrow-enabled BTrDB server." ) + if pa is None: + raise ImportError(_IMPORT_ERR_MSG.format("pyarrow")) return streamset.arrow_values() @@ -362,11 +522,16 @@ def to_polars(streamset, agg="mean", name_callable=None): name_callable : lambda, default: lambda s: s.collection + "/" + s.name Specify a callable that can be used to determine the series name given a Stream object. This is not compatible with agg == "all" at this time + + + + .. note:: + + This method does **not** use the ``arrow`` -accelerated endpoints for faster and more efficient data retrieval. + """ - try: - import polars as pl - except ImportError: - raise ImportError("Please install polars to use this transformation function.") + if pl is None or pd is None: + raise ImportError(_IMPORT_ERR_MSG.format(",".join(["polars", "pandas"]))) # TODO: allow this at some future point if agg == "all" and name_callable is not None: @@ -392,7 +557,7 @@ def to_polars(streamset, agg="mean", name_callable=None): else: df = df.set_index("time") - df.index = pd.DatetimeIndex(df.index, tz="UTC") + df.index = pd.DatetimeIndex(df.index, tz="UTC", name="time") if agg == "all" and streamset.allow_window: stream_names = [ [s.collection, s.name, prop] @@ -403,7 +568,7 @@ def to_polars(streamset, agg="mean", name_callable=None): else: df.columns = _stream_names(streamset, name_callable) - return pl.from_pandas(df.reset_index(), nan_to_null=False) + return pl.from_pandas(df, nan_to_null=False, include_index=True) def to_array(streamset, agg="mean"): @@ -418,11 +583,14 @@ def to_array(streamset, agg="mean"): arrays. Must be one of "min", "mean", "max", "count", or "stddev". This argument is ignored if RawPoint values are passed into the function. + + .. note:: + + This method does **not** use the ``arrow`` -accelerated endpoints for faster and more efficient data retrieval. + """ - try: - import numpy as np - except ImportError: - raise ImportError("Please install Numpy to use this transformation function.") + if np is None: + raise ImportError(_IMPORT_ERR_MSG.format("numpy")) # TODO: allow this at some future point if agg == "all": @@ -450,16 +618,25 @@ def arrow_to_numpy(streamset, agg=None): arrays. Must be one or more of "min", "mean", "max", "count", or "stddev". This argument is ignored if RawPoint values are passed into the function. - Notes - ----- - This method first converts to a pandas data frame then to a numpy array. - This method is available for commercial customers with arrow-enabled servers. + .. note:: + + This method first converts to a pandas data frame then to a numpy array. + + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + """ if not streamset._btrdb._ARROW_ENABLED: raise NotImplementedError( "arrow_to_numpy requires an arrow-enabled BTrDB server." ) + if np is None or pa is None or pd is None: + raise ImportError( + _IMPORT_ERR_MSG.format(",".join(["numpy", "pyarrow", "pandas"])) + ) arrow_df = arrow_to_dataframe(streamset=streamset, agg=agg, name_callable=None) return arrow_df.values @@ -480,6 +657,11 @@ def to_dict(streamset, agg="mean", name_callable=None): Specify a callable that can be used to determine the series name given a Stream object. + + .. note:: + + This method does **not** use the ``arrow`` -accelerated endpoints for faster and more efficient data retrieval. + """ if not callable(name_callable): name_callable = lambda s: s.collection + "/" + s.name @@ -524,14 +706,18 @@ def arrow_to_dict(streamset, agg=None, name_callable=None): Specify a callable that can be used to determine the series name given a Stream object. - Notes - ----- - This method is available for commercial customers with arrow-enabled servers. + + .. note:: + + This method is available for commercial customers with arrow-enabled servers. + """ if not streamset._btrdb._ARROW_ENABLED: raise NotImplementedError( "arrow_to_dict requires an arrow-enabled BTrDB server." ) + if pa is None or pd is None: + raise ImportError(_IMPORT_ERR_MSG.format(",".join(["pyarrow", "pandas"]))) if agg is None: agg = ["mean"] if not isinstance(agg, list): @@ -571,6 +757,12 @@ def to_csv( name_callable : lambda, default: lambda s: s.collection + "/" + s.name Specify a callable that can be used to determine the series name given a Stream object. + + + .. note:: + + This method does **not** use the ``arrow`` -accelerated endpoints for faster and more efficient data retrieval. + """ # TODO: allow this at some future point @@ -620,6 +812,11 @@ def to_table(streamset, agg="mean", name_callable=None): Specify a callable that can be used to determine the column name given a Stream object. + + .. note:: + + This method does **not** use the ``arrow`` -accelerated endpoints for faster and more efficient data retrieval. + """ try: from tabulate import tabulate diff --git a/btrdb/utils/general.py b/btrdb/utils/general.py index 6344fe6..d438fce 100644 --- a/btrdb/utils/general.py +++ b/btrdb/utils/general.py @@ -12,12 +12,14 @@ """ General utilities for btrdb bindings """ +from datetime import timedelta + +from btrdb.utils.timez import to_nanoseconds + ########################################################################## ## Functions ########################################################################## - - def unpack_stream_descriptor(desc): """ Returns dicts for tags and annotations found in supplied stream @@ -74,6 +76,50 @@ def from_nanoseconds(cls, nsec): break return cls(pos) + def for_aligned_windows(self, start, end): + """ + Returns the aligned_windows's first and last timestamps, as well as the number of windows, + based on a given pointwidth set. + + Parameters + ---------- + start : int or datetime like object + The start time in nanoseconds for the range to be queried. (see + :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) + + end : int or datetime like object + The end time in nanoseconds for the range to be queried. (see + :func:`btrdb.utils.timez.to_nanoseconds` for valid input types) + + + Returns + ------- + aligned_start: int + First timestamp would be returned by `aligned_windows` that is inclusive of specified start + timestamp. + aligned_end: int + Last timestamp would be returned by `aligned_windows` that is inclusive of specified end + timestamp. + n_windows: int + The number of windows would be returned by `aligned_windows`. + + Examples + -------- + Querying `aligned_windows` of pointwidth of 30, spaning 1 day (~24 hours). + + >>> start, end = "2016-03-01", "2016-03-02" + >>> pointwidth(30).for_aligned_windows(start, end) + (1456790399996657664, 1456876798632525824, 80466) + # output timestamp's `strftime` is: + # ['2016-02-29 23:59:59.996657664', '2016-03-01 23:59:58.632525824'] + + """ + start, end = to_nanoseconds(start), to_nanoseconds(end) + aligned_start = start - (start % self.nanoseconds) + n_windows = (end - aligned_start) // self.nanoseconds + aligned_end = aligned_start + (self.nanoseconds * (n_windows - 1)) + return aligned_start, aligned_end, n_windows + def __init__(self, p): self._pointwidth = int(p) @@ -123,6 +169,12 @@ def decr(self): def incr(self): return pointwidth(self + 1) + def to_timedelta(self): + """ + Returns the timedelta of the pointwidth. + """ + return timedelta(microseconds=self.microseconds) + def __int__(self): return self._pointwidth diff --git a/btrdb/utils/timez.py b/btrdb/utils/timez.py index 32cec9e..3c0428a 100644 --- a/btrdb/utils/timez.py +++ b/btrdb/utils/timez.py @@ -55,7 +55,7 @@ def currently_as_ns(): def ns_to_datetime(ns): """ - Converts nanoseconds to a datetime object (UTC) + Converts nanoseconds to a naive datetime object (UTC+0) Parameters ---------- @@ -178,7 +178,7 @@ def ns_delta( days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0, nanoseconds=0 ): """ - Similar to `timedelta`, ns_delta represents a span of time but as + Similar to ``timedelta``, ``ns_delta`` represents a span of time but as the total number of nanoseconds. Parameters @@ -202,6 +202,14 @@ def ns_delta( ------- amount of time in nanoseconds : int + + Examples + -------- + 1 minute time delta should be 60 billion nanoseconds + + >>> deltaT = ns_delta(minutes=1) + >>> deltaT == 1 * 60 * 10**9 # 1 minute * 60 seconds * 1billion nanoseconds / second + True """ MICROSECOND = 1000 MILLISECOND = MICROSECOND * 1000 diff --git a/btrdb/version.py b/btrdb/version.py index d657ab4..7bf90da 100644 --- a/btrdb/version.py +++ b/btrdb/version.py @@ -15,7 +15,7 @@ ## Module Info ########################################################################## -__version_info__ = {"major": 5, "minor": 31, "micro": 0, "releaselevel": "final"} +__version_info__ = {"major": 5, "minor": 32, "micro": 0, "releaselevel": "final"} ########################################################################## ## Helper Functions diff --git a/docs/requirements.txt b/docs/requirements.txt index 49e71d8..2e6e1f1 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,8 @@ alabaster>=0.7.12 Sphinx>=1.7 +ipython sphinx-rtd-theme +sphinx-copybutton +sphinx-design numpydoc pydata-sphinx-theme diff --git a/docs/source/api/streams.rst b/docs/source/api/streams.rst index 7137d36..7707bbc 100644 --- a/docs/source/api/streams.rst +++ b/docs/source/api/streams.rst @@ -1,6 +1,7 @@ btrdb.stream ============== + .. _StreamGeneralDocs: .. automodule:: btrdb.stream @@ -8,5 +9,7 @@ btrdb.stream :members: .. _StreamSet API: -.. autoclass:: StreamSetBase +.. autoclass:: StreamSet + :show-inheritance: + :inherited-members: :members: diff --git a/docs/source/concepts.rst b/docs/source/concepts.rst index 4685556..2392034 100644 --- a/docs/source/concepts.rst +++ b/docs/source/concepts.rst @@ -11,7 +11,7 @@ of their behavior which will allow you to use them effectively. .. note:: - Data requests are fully materialized at this time. A future release will include the option to process data using generators to save on memory usage. + Data requests are fully materialized at this time. A future release will include the option to process data using generators to save on memory usage. BTrDB Server @@ -38,48 +38,46 @@ value within the stream. .. code-block:: python - # view time and value of a single point in the stream + >>> # view time and value of a single point in the stream + >>> point.time + 1547241923338098176 - point.time - >>> 1547241923338098176 - - point.value - >>> 120.5 + >>> point.value + 120.5 StatPoint ^^^^^^^^^^^^ The StatPoint provides statistics about multiple points and gives -aggregation values such as `min`, `max`, `mean`, and `stddev` (standard deviation). +aggregation values such as :code:`min`, :code:`max`, :code:`mean`, :code:`count` and :code:`stddev` (standard deviation). This is most useful when you don't need to touch every individual value such as when you only need the count of the values over a range of time. These statistical queries execute in time proportional to the number of results, not the number of underlying points (i.e logarithmic time) and so you can attain valuable data in a fraction of the time when compared with retrieving -all of the individual values. Due to the internal data structures, BTrDB does +all of the individual values. Due to the internal data structures, :code:`BTrDB` does not need to read the underlying points to return these statistics! .. code-block:: python - # view aggregate values for points in a stream - - point.time - >>> 1547241923338098176 + >>> # view aggregate values for points in a stream + >>> point.time + 1547241923338098176 - point.min - >>> 42.1 + >>>point.min + 42.1 - point.mean - >>> 78.477 + >>> point.mean + 78.477 - point.max - >>> 122.4 + >>> point.max + 122.4 - point.count - >>> 18600 + >>> point.count + 18600 - point.stddev - >>> 3.4 + >>> point.stddev + 3.4 Tabular Data @@ -91,52 +89,55 @@ Refer to the :ref:`arrow enabled queries page ` and the :ref:`API do Streams ------------ -Streams represent a single series of time/value pairs. As such, the database +:code:`Stream` s represent a single series of time/value pairs. As such, the database can hold an almost unlimited amount of individual streams. Each stream has a :code:`collection` which is similar to a "path" or grouping for multiple streams. Each steam will also have a :code:`name` as well as a :code:`uuid` which is guaranteed to be unique across streams. -BTrDB data is versioned such that changes to a given stream (time series) will +:code:`BTrDB` data is versioned such that changes to a given stream (time series) will result in a new version for the stream. In this manner, you can pin your interactions to a specific version ensuring the values do not change over the course of your -interactions. If you want to work with the most recent version/data then -specify a version of zero (the default). +interactions. + +.. note:: + + If you want to work with the most recent version/data then specify a version of :code:`0` (the default). Each stream has a number of attributes and methods available and these are documented -within the API section of this publication. But the most common interactions -by users are to access the UUID, tags, annotations, version, and underlying data. +within the :ref:`API Reference ` section of this publication. But the most common interactions +by users are to access the :code:`UUID`, :code:`tags`, :code:`annotations`, :code:`version`, and underlying data. -Each stream uses a UUID as its unique identifier which can also be used when querying -for streams. Metadata is provided by tags and annotations which are both provided -as dictionaries of data. Tags are used internally and have very specific keys -while annotations are more free-form and can be used by you to store your own +Each stream uses a :code:`UUID` as its unique identifier which can also be used when querying +for streams. Metadata is provided by :code:`tags` and :code:`annotations` which are both provided +as dictionaries of data. :code:`tags` are used internally and have very specific keys +while :code:`annotations` are more free-form and can be used by you to store your own metadata. .. code-block:: python - # retrieve stream's UUID - stream.uuid - >>> UUID("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") + >>> # retrieve stream's UUID + >>> stream.uuid + UUID("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") - # retrieve stream's current version - stream.version() - >>> 244 + >>> # retrieve stream's current version + >>> stream.version() + 244 - # retrieve stream tags - stream.tags() - >>> {'name': 'L1MAG', 'unit': 'volts', 'ingress': ''} + >>> # retrieve stream tags + >>> stream.tags() + {'name': 'L1MAG', 'unit': 'volts', 'ingress': ''} - # retrieve stream annotations - stream.annotations() - >>> {'poc': 'Salvatore McFesterson', 'region': 'northwest', 'state': 'WA'} + >>> # retrieve stream annotations + >>> stream.annotations() + ({'poc': 'Salvatore McFesterson', 'region': 'northwest', 'state': 'WA'}, 23) - # loop through points in the stream - for point, _ in stream.values(end=1547241923338098176, version=133): - print(point) - >>> RawPoint(1500000000100000000, 2.4) - >>> RawPoint(1500000000200000000, 2.8) - >>> RawPoint(1500000000300000000, 3.6) + >>> # loop through points in the stream + >>> for point, _ in stream.values(end=1547241923338098176, version=133): + >>> print(point) + RawPoint(1500000000100000000, 2.4) + RawPoint(1500000000200000000, 2.8) + RawPoint(1500000000300000000, 3.6) ... @@ -144,8 +145,8 @@ StreamSets ------------ Often you will want to query and work with multiple streams instead of just an -individual stream - StreamSets allow you to do this effectively. It is a light -wrapper around a list of Stream objects with convenience methods provided to +individual stream - :code:`StreamSets` allow you to do this effectively. It is a light +wrapper around a list of :code:`Stream` objects with convenience methods provided to help you work with multiple streams of data. As an example, you can filter the stream data with a single method call and then @@ -153,17 +154,45 @@ easily transform the data into other data types such as a pandas DataFrame or to disk as a CSV file. See the examples below for a quick sample and then visit our API docs to see the full list of features provided to you. +.. note:: + + :code:`StreamSet` methods that filter and operate on the :code:`StreamSet` object (like :code:`StreamSet.filter` ) return new copies of the :code:`StreamSet` itself rather than modifying in place. Similar to how most :code:`pandas.DataFrame` methods return a new :code:`DataFrame` object. This lets you compose multiple functions in a single call, which can improve readability, but can be tricky if you are not expecting this behavior. + +Lets explore a common use-case, filtering a streamset. + +.. code-block:: python + + >>> # create a streamset and apply a few filters + >>> streamset = btrdb.stream.StreamSet(list_of_streams) + >>> print(f"Total streams: {len(streamset)}") + Total streams: 89 + + >>> streamset.filter(units="Volts") + >>> print(f"Total streams: {len(streamset)}") + Total streams: 89 + + >>> filtered_streamset = streamset.filter(units="Volts") + >>> print(f"Total streams: {len(filtered_streamset)}") + Total streams: 23 + + >>> multiple_filters_streamset = (streamset.filter(unit="Volts") + >>> .filter(name="Sensor 1") + >>> .filter(annotations={"phase":"A"}) + >>> ) + >>> print(f"Total streams: {len(multiple_filters_streamset)}") + Total streams: 1 + .. code-block:: python - # establish database connection and query for streams by UUID - db = connect() - uuid_list = ["0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a", ...] - streams = db.streams(*uuid_list) + >>> # establish database connection and query for streams by UUID + >>> db = connect() + >>> uuid_list = ["0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a", ...] + >>> streams = db.streams(*uuid_list) - streams.filter(start=1500000000000000000).to_csv("data.csv") + >>> streams.filter(start=1500000000000000000).to_csv("data.csv") - streams.filter(start=1500000000000000000).to_dataframe() - >> time NW/stream0 NW/stream1 + >>> streams.filter(start=1500000000000000000).to_dataframe() + time NW/stream0 NW/stream1 0 1500000000000000000 NaN 1.0 1 1500000000100000000 2.0 NaN 2 1500000000200000000 NaN 3.0 diff --git a/docs/source/conf.py b/docs/source/conf.py index 1cc1ebf..5db17d4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -27,7 +27,7 @@ # -- Project information ----------------------------------------------------- project = "btrdb" -copyright = "2023, Ping Things, Inc." +copyright = "2024, Ping Things, Inc." author = "PingThingsIO" # The short X.Y version @@ -48,10 +48,18 @@ extensions = [ "sphinx.ext.autodoc", "sphinx.ext.napoleon", + "sphinx.ext.autosummary", + # 'sphinx.ext.inheritance_diagram', + # 'sphinx.ext.intersphinx', + "sphinx.ext.ifconfig", + # 'IPython.sphinxext.ipython_console_highlighting', + # 'IPython.sphinxext.ipython_directive', "sphinx.ext.todo", "sphinx.ext.githubpages", - "sphinx.ext.intersphinx", + # "sphinx.ext.intersphinx", "numpydoc", + "sphinx_copybutton", + "sphinx_design", ] # Add any paths that contain templates here, relative to this directory. @@ -96,16 +104,19 @@ # documentation. # html_theme_options = { - "show_powered_by": False, - "github_user": "PingThingsIO", - "github_repo": "btrdb-python", - "travis_button": False, - "github_banner": False, - "show_related": False, - "note_bg": "#FFF59C", - "description": "A midweight library to converse with the BTrDB database.", - "extra_nav_links": {"btrdb": "http://btrdb-python.readthedocs.io"}, - "show_relbars": True, + # "show_powered_by": False, + # "github_user": "PingThingsIO", + # "github_repo": "btrdb-python", + # "travis_button": False, + # "github_banner": False, + # "show_related": False, + # "note_bg": "#FFF59C", + # "description": "A midweight library to converse with the BTrDB database.", + # "extra_nav_links": {"btrdb": "http://btrdb-python.readthedocs.io"}, + # "show_relbars": True, + "show_toc_level": 2, + "navigation_depth": 4, # Adjust the depth of the sidebar TOC + "show_nav_level": 2, # Initially shown levels of the TOC } # Add any paths that contain custom static files (such as style sheets) here, diff --git a/docs/source/explained.rst b/docs/source/explained.rst index 68bdfe7..d40eea7 100644 --- a/docs/source/explained.rst +++ b/docs/source/explained.rst @@ -5,7 +5,7 @@ BTrDB Explained **A next-gen timeseries database for dense, streaming telemetry.** -**Problem**: Existing timeseries databases are poorly equipped for a new generation of ultra-fafst sensor telemetry. Specifically, millions of high-precision power meters are to be deployed through the power grid to help analyze and prevent blackouts. Thus, new software must be built to facilitate the storage and analysis of its data. +**Problem**: Existing timeseries databases are poorly equipped for a new generation of ultra-fast sensor telemetry. Specifically, millions of high-precision power meters are to be deployed through the power grid to help analyze and prevent blackouts. Thus, new software must be built to facilitate the storage and analysis of its data. **Baseline**: We need 1.4M inserts/second and 5x that in reads if we are to support 1000 `micro-synchrophasors`_ per server node. No timeseries database can do this. diff --git a/docs/source/index.rst b/docs/source/index.rst index 3f1076b..5320d6f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -16,15 +16,6 @@ Welcome to btrdb docs! .. image:: https://img.shields.io/pypi/v/btrdb.svg :target: https://pypi.python.org/project/btrdb/ -.. note:: - - Starting with the 5.0 release, btrdb-python will be Python 3 only! This decision - was not made lightly but is necessary to keep compatibility with underlying - packages. - - In addition, this software is only compatible with version 5.x of the BTrDB - server. To communicate with a 4.x server, please install an earlier version - of this software. Welcome to btrdb-python's documentation. We provide Python access to the Berkeley Tree Database (BTrBD) along with some select convenience methods. If you are @@ -59,6 +50,9 @@ your appetite. for point, _ in stream.values(start, end): print(point.time, point.value) + # return the data as an arrow table instead + data = stream.arrow_values(start, end) + User Guide ---------- diff --git a/docs/source/installing.rst b/docs/source/installing.rst index 71cab8e..99a873b 100644 --- a/docs/source/installing.rst +++ b/docs/source/installing.rst @@ -1,13 +1,13 @@ Installing ======================== -The btrdb package has only a few requirements and is relatively easy to install. +The :code:`btrdb` package has only a few requirements and is relatively easy to install. A number of installation options are available as detailed below. Installing with pip ------------------- -We recommend using pip to install btrdb-python on all platforms: +We recommend using :code:`pip` to install :code:`btrdb-python` on all platforms: .. code-block:: bash @@ -24,7 +24,7 @@ We recommend installing the :code:`data` extra dependencies (the second option i $ pip install "btrdb[all]>=5.30.2" # btrdb with testing, data science and all other optional packages -To get a specific version of btrdb-python supply the version number. The major +To get a specific version of :code:`btrdb-python` supply the version number. The major version of this library is tied to the major version of the BTrDB database as in the 4.X bindings are best used to speak to a 4.X BTrDB database, the 5.X bindings for 5.X platform.. @@ -43,4 +43,4 @@ To upgrade using pip: Installing with Anaconda ------------------------ -We recommend installing using ``pip``. +We recommend installing using :code:`pip`. diff --git a/docs/source/quick-start.rst b/docs/source/quick-start.rst index 8901177..7e70ce8 100644 --- a/docs/source/quick-start.rst +++ b/docs/source/quick-start.rst @@ -9,22 +9,22 @@ Connecting to a server is easy with the supplied :code:`connect` function from t .. code-block:: python - import btrdb - - # connect without credentials - conn = btrdb.connect("192.168.1.101:4410") - - # connect using TLS - conn = btrdb.connect("192.168.1.101:4411") - - # connect with API key - conn = btrdb.connect("192.168.1.101:4411", apikey="123456789123456789") + >>> import btrdb + >>> # connect with API key + >>> conn = btrdb.connect("192.168.1.101:4411", apikey="123456789123456789") + >>> conn + Get Platform Information ^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python - conn.info() + >>> conn.info() + {'majorVersion': ..., + 'minorVersion': ..., + 'build': ..., + 'proxy': {...}} + Refer to :ref:`the connection API documentation page. ` @@ -32,8 +32,8 @@ Refer to :ref:`the connection API documentation page. ` Retrieving a Stream ---------------------- -In order to interact with data, you'll need to obtain or create a :code:`Stream` object. A -number of options are available to get existing streams. +In order to interact with data, you'll need to obtain or create a :code:`Stream` object. +A number of options are available to get existing streams. Find streams by collection ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -43,9 +43,9 @@ collection you can use the :code:`streams_in_collection` method. .. code-block:: python - streams = conn.streams_in_collection("USEAST_NOC1/90807") - for stream in streams: - print(stream.uuid, stream.name) + >>> streams = conn.streams_in_collection("USEAST_NOC1/90807") + >>> for stream in streams: + >>> print(stream.uuid, stream.name) Find stream by UUID ^^^^^^^^^^^^^^^^^^^^^ @@ -55,7 +55,7 @@ would like to retrieve. For convenience, this method accepts instances of either .. code-block:: python - stream = conn.stream_from_uuid("07d28a44-4991-492d-b9c5-2d8cec5aa6d4") + >>> stream = conn.stream_from_uuid("07d28a44-4991-492d-b9c5-2d8cec5aa6d4") @@ -69,42 +69,65 @@ if needed. .. code-block:: python - start = datetime(2018,1,1,12,30, tzinfo=timezone.utc) - start = start.timestamp() * 1e9 - end = start + (3600 * 1e9) + >>> start = datetime(2018,1,1,12,30, tzinfo=timezone.utc) + >>> start = start.timestamp() * 1e9 + >>> end = start + (3600 * 1e9) - for point, _ in stream.values(start, end): - print(point.time, point.value) + >>> for point, _ in stream.values(start, end): + >>> print(point.time, point.value) Some convenience functions are available to make it easier to deal with converting to nanoseconds. .. code-block:: python - from btrdb.utils.timez import to_nanoseconds, currently_as_ns + >>> from btrdb.utils.timez import to_nanoseconds, currently_as_ns - start = to_nanoseconds(datetime(2018,1,1, tzinfo=timezone.utc)) - end = currently_as_ns() + >>> start = to_nanoseconds(datetime(2018,1,1, tzinfo=timezone.utc)) + >>> end = currently_as_ns() - for point, _ in stream.values(start, end): - print(point.time, point.value) + >>> for point, _ in stream.values(start, end): + >>> print(point.time, point.value) You can also view windows of data at arbitrary levels of detail. One such windowing feature is shown below. .. code-block:: python - # query for windows of data 10,000 nanoseconds wide using a depth of zero - # which is accurate to the nanosecond. - params = { - "start": 1500000000000000000, - "end": 1500000000010000000, - "width": 2000000, - "depth": 0, - } - for window in stream.windows(**params): - for point, version in window: - print(point, version) + >>> # query for windows of data 10,000 nanoseconds wide using a depth of zero + >>> # which is accurate to the nanosecond. + >>> params = { + ... "start": 1500000000000000000, + ... "end": 1500000000010000000, + ... "width": 2000000, + ... "depth": 0, + ... } + >>> for window in stream.windows(**params): + >>> for point, version in window: + >>> print(point, version) + + +Return data as :code:`arrow` tables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Instead of returning data a :code:`RawPoint` at a time, which can be more computationally intensive, there is now the ability to return the data in a tabular format from the start, which can drastically save on run time as well as facilitate interoperability with many more data-science driven tools. +`Apache Arrow is a language agnostic columnar data schema `_ that has become a defacto standard for in-memory data analytics. +All data retrieval methods in :code:`BTrDB` now have corresponding :code:`arrow-` prepended methods that natively return :code:`pyarrow` data tables. + +.. code-block:: python + + >>> s.arrow_values(start=1500000000000000000, end=1500000002000000001).to_pandas() + time value + 0 2017-07-14 02:40:00+00:00 1.0 + 1 2017-07-14 02:40:00.100000+00:00 2.0 + 2 2017-07-14 02:40:00.200000+00:00 3.0 + 3 2017-07-14 02:40:00.300000+00:00 4.0 + 4 2017-07-14 02:40:00.400000+00:00 5.0 + 5 2017-07-14 02:40:00.500000+00:00 6.0 + 6 2017-07-14 02:40:00.600000+00:00 7.0 + 7 2017-07-14 02:40:00.700000+00:00 8.0 + 8 2017-07-14 02:40:00.800000+00:00 9.0 + 9 2017-07-14 02:40:00.900000+00:00 10.0 + Using StreamSets -------------------- @@ -118,33 +141,45 @@ level of the individual :code:`Stream` object. Aside from being useful to see concurrent data across streams, you can also easily transform the data to other data structures or even serialize the data to disk in one operation. -Some quick examples are shown below but please review the API docs for the full +Some quick examples are shown below but please review the :ref:`API docs ` for the full list of features. + +.. note:: + + In the following examples, notice that the end time is **not** inclusive of the data that is present at :code:`end` . :code:`start` is **inclusive** while :code:`end` is **exclusive**. This is the case for **all** :code:`BTrDB` data query operations. + + .. math:: + [start, end) + + .. code-block:: python - streams = db.streams(*uuid_list) - - # serialize data to disk as CSV - streams.filter(start=1500000000000000000).to_csv("data.csv") - - # convert data to a pandas DataFrame - streams.filter(start=1500000000000000000).to_dataframe() - >> time NW/stream0 NW/stream1 - 0 1500000000000000000 NaN 1.0 - 1 1500000000100000000 2.0 NaN - 2 1500000000200000000 NaN 3.0 - 3 1500000000300000000 4.0 NaN - 4 1500000000400000000 NaN 5.0 - 5 1500000000500000000 6.0 NaN - 6 1500000000600000000 NaN 7.0 - 7 1500000000700000000 8.0 NaN - 8 1500000000800000000 NaN 9.0 - 9 1500000000900000000 10.0 NaN - - # materialize the streams' data - streams.filter(start=1500000000000000000).values() - >> [[RawPoint(1500000000100000000, 2.0), + >>> streams = db.streams(*uuid_list) + + >>> # serialize data to disk as CSV + >>> streams.filter(start=1500000000000000000, end=1500000000900000000).to_csv("data.csv") + + >>> # convert data to a pandas DataFrame + >>> streams.filter(start=1500000000000000000, end=1500000000900000000).to_dataframe() + nw/stream0 nw/stream1 + time + 1500000000000000000 nan 1.0 + 1500000000100000000 2.0 nan + 1500000000200000000 nan 3.0 + 1500000000300000000 4.0 nan + 1500000000400000000 nan 5.0 + 1500000000500000000 6.0 nan + 1500000000600000000 nan 7.0 + 1500000000700000000 8.0 nan + 1500000000800000000 nan 9.0 + + + + + >>> # materialize the streams' data + >>> streams.filter(start=1500000000000000000, end=1500000000900000000).values() + [[RawPoint(1500000000100000000, 2.0), RawPoint(1500000000300000000, 4.0), RawPoint(1500000000500000000, 6.0), RawPoint(1500000000700000000, 8.0), @@ -153,3 +188,48 @@ list of features. RawPoint(1500000000200000000, 3.0), RawPoint(1500000000400000000, 5.0), ... + + + + + + +Return data as :code:`arrow` tables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:code:`StreamSets` are also able to return :code:`arrow` tables for the group of streams they represent. +This is especially convenient and is usually **much** faster than using the traditional :code:`RawPoint` -based data representation. +We recommend using the :code:`arrow` functions whenever possible. + +.. code-block:: python + + >>> # convert data to a pandas DataFrame, using pyarrow + >>> streams.filter(start=1500000000000000000, end=1500000000900000000) + ... .arrow_to_dataframe() + NW/stream0 NW/stream1 + time + 2017-07-14 02:40:00+00:00 NaN 1.0 + 2017-07-14 02:40:00.100000+00:00 2.0 NaN + 2017-07-14 02:40:00.200000+00:00 NaN 3.0 + 2017-07-14 02:40:00.300000+00:00 4.0 NaN + 2017-07-14 02:40:00.400000+00:00 NaN 5.0 + 2017-07-14 02:40:00.500000+00:00 6.0 NaN + 2017-07-14 02:40:00.600000+00:00 NaN 7.0 + 2017-07-14 02:40:00.700000+00:00 8.0 NaN + 2017-07-14 02:40:00.800000+00:00 NaN 9.0 + + + >>> # materialize the streams' data as an arrow table + >>> streams.filter(start=1500000000000000000, end=1500000000900000000).arrow_values() + pyarrow.Table + time: timestamp[ns, tz=UTC] not null + b29204f4-6c13-4ec7-a149-88e2ff950a72: double not null + 99a0d0b0-e24f-4875-b7d8-eae0036f2149: double not null + ---- + time: [ + ... [2017-07-14 02:40:00.000000000Z,2017-07-14 02:40:00.100000000Z, + ... 2017-07-14 02:40:00.200000000Z,2017-07-14 02:40:00.300000000Z, + ... 2017-07-14 02:40:00.400000000Z,2017-07-14 02:40:00.500000000Z, + ... 2017-07-14 02:40:00.600000000Z,2017-07-14 02:40:00.700000000Z, + ... 2017-07-14 02:40:00.800000000Z]] + b29204f4-6c13-4ec7-a149-88e2ff950a72: [[nan,2,nan,4,nan,6,nan,8,nan]] + 99a0d0b0-e24f-4875-b7d8-eae0036f2149: [[1,nan,3,nan,5,nan,7,nan,9]] diff --git a/docs/source/working/arrow-enabled-queries.rst b/docs/source/working/arrow-enabled-queries.rst index 8a40a5e..fa531c9 100644 --- a/docs/source/working/arrow-enabled-queries.rst +++ b/docs/source/working/arrow-enabled-queries.rst @@ -24,9 +24,9 @@ To learn more about these methods, please refer to the :ref:`arrow_ prefixed met True Multistream Support ^^^^^^^^^^^^^^^^^^^^^^^^ -Until now, there has not been a true multistream query support, our previous api and with the new edits, emulates multistream support with :code:`StreamSet`s and using multithreading. +Until now, there has not been a true multistream query support, our previous api and with the new edits, emulates multistream support with :code:`StreamSet` s and using multithreading. However, this will still only scale to an amount of streams based on the amount of threads that the python threadpool logic can support. -Due to this, raw data queries for :code:`StreamSet`s using our arrow api :code:`StreamSet.filter(start=X, end=Y,).arrow_values()` will now perform true multistream queries. +Due to this, raw data queries for :code:`StreamSet` s using our arrow api :code:`StreamSet.filter(start=X, end=Y,).arrow_values()` will now perform true multistream queries. The platform, instead of the python client, will now quickly grab all the stream data for all streams in your streamset, and then package that back to the python client in an :code:`arrow` table! This leads to data fetch speedups on the order of 10-50x based on the amount and kind of streams. diff --git a/docs/source/working/stream-manage-metadata.rst b/docs/source/working/stream-manage-metadata.rst index 75b7fd6..7d90655 100644 --- a/docs/source/working/stream-manage-metadata.rst +++ b/docs/source/working/stream-manage-metadata.rst @@ -102,7 +102,7 @@ that the data has already been changed by another user or process. annotations=annotations ) -If you would like to remove any keys from your annotations you must use the `replace=True` keyword argument. This will ensure that the annotations dictionary you provide completely replaces the existing values rather than perform an UPSERT operation. The example below shows how you could remove an existing key from the annotations dictionary. +If you would like to remove any keys from your annotations you must use the :code:`replace=True` keyword argument. This will ensure that the annotations dictionary you provide completely replaces the existing values rather than perform an UPSERT operation. The example below shows how you could remove an existing key from the annotations dictionary. .. code-block:: python diff --git a/docs/source/working/stream-query-manage.rst b/docs/source/working/stream-query-manage.rst index d0f83c6..9476381 100644 --- a/docs/source/working/stream-query-manage.rst +++ b/docs/source/working/stream-query-manage.rst @@ -75,15 +75,15 @@ detail. Querying Metadata ----------------- Finally, you can query for metadata using standard SQL although at the moment, only the -`streams` table is available. SQL queries can be submitted using the `query` -method which accepts both a `stmt` and `params` argument. The `stmt` should -contain the SQL you'd like executed with parameter placeholders such as `$1` or -`$2` as shown below. +:code:`streams` table is available. SQL queries can be submitted using the :code:`query` +method which accepts both a :code:`stmt` and :code:`params` argument. The :code:`stmt` should +contain the SQL you'd like executed with parameter placeholders such as :code:`$1` or +:code:`$2` as shown below. .. code-block:: python conn = btrdb.connect() - stmt = "select uuid from streams where name = $1 or name = $2" + stmt = "SELECT uuid FROM streams WHERE name = $1 OR name = $2" params = ["Boston_1", "Boston_2"] for row in conn.query(stmt, params): diff --git a/docs/source/working/stream-view-data.rst b/docs/source/working/stream-view-data.rst index 915e75e..b9c41c7 100644 --- a/docs/source/working/stream-view-data.rst +++ b/docs/source/working/stream-view-data.rst @@ -45,7 +45,7 @@ you query for all the data using the :code:`Stream.values` method due to the mem consumption implied). Each of these three methods returns a tuple containing a RawPoint and the data version number. The exact timestamp can be obtained from the RawPoint. Keep in mind that all of these methods accept a :code:`version` argument so that -you can ask for the earliest, latest, or nearest point from a previous version of the stream. +you can ask for the :code:`earliest`, :code:`latest`, or :code:`nearest` point from a previous version of the stream. .. code-block:: python @@ -86,7 +86,7 @@ aggregate of all the raw data within a window of width 2^pointwidth nanoseconds. Note that :code:`start` is inclusive, but :code:`end` is exclusive. That is, results -will be returned for all windows that start in the interval [start, end). +will be returned for all windows that start in the interval :math:`[start, end)`. If end < start+2^pointwidth you will not get any results. If start and end are not powers of two, the bottom pointwidth bits will be cleared. Each window will contain statistical summaries of the window. Statistical points @@ -150,37 +150,37 @@ of sample depths are provided below. +-------+-------------+---------------------------+-----------------+ As usual when querying data from BTrDB, the :code:`start` time is inclusive -while the :code:`end` time is exclusive. Note that if your last window spans +while the :code:`end` time is exclusive. **Note**: that if your last window spans across the end time then it will not be included in the results. .. code-block:: python - start = 1500000000000000000 - end = 1500000001000000000 - - # view underlying data for comparison - for point, _ in stream.values(start=start, end=end): - print(point) - >> RawPoint(1500000000000000000, 1.0) - >> RawPoint(1500000000100000000, 2.0) - >> RawPoint(1500000000200000000, 3.0) - >> RawPoint(1500000000300000000, 4.0) - >> RawPoint(1500000000400000000, 5.0) - >> RawPoint(1500000000500000000, 6.0) - >> RawPoint(1500000000600000000, 7.0) - >> RawPoint(1500000000700000000, 8.0) - >> RawPoint(1500000000800000000, 9.0) - >> RawPoint(1500000000900000000, 10.0) - - # each window spans 300 milleseconds - width = 300000000 - - # request a precision of roughly 1 millesecond - depth = 20 - - # view windowed data - for point, _ in stream.windows(start=start, end=end, - width=width, depth=depth): - >> StatPoint(1500000000000000000, 1.0, 2.0, 3.0, 3, 0.816496580927726) - >> StatPoint(1500000000300000000, 4.0, 5.0, 6.0, 3, 0.816496580927726) - >> StatPoint(1500000000600000000, 7.0, 8.0, 9.0, 3, 0.816496580927726) + >>> start = 1500000000000000000 + >>> end = 1500000001000000000 + + >>> # view underlying data for comparison + >>> for point, _ in stream.values(start=start, end=end): + >>> print(point) + RawPoint(1500000000000000000, 1.0) + RawPoint(1500000000100000000, 2.0) + RawPoint(1500000000200000000, 3.0) + RawPoint(1500000000300000000, 4.0) + RawPoint(1500000000400000000, 5.0) + RawPoint(1500000000500000000, 6.0) + RawPoint(1500000000600000000, 7.0) + RawPoint(1500000000700000000, 8.0) + RawPoint(1500000000800000000, 9.0) + RawPoint(1500000000900000000, 10.0) + + >>> # each window spans 300 milliseconds + >>> width = 300000000 + + >>> # request a precision of roughly 1 millisecond + >>> depth = 20 + + >>> # view windowed data + >>> for point, _ in stream.windows(start=start, end=end, + ... width=width, depth=depth): + StatPoint(1500000000000000000, 1.0, 2.0, 3.0, 3, 0.816496580927726) + StatPoint(1500000000300000000, 4.0, 5.0, 6.0, 3, 0.816496580927726) + StatPoint(1500000000600000000, 7.0, 8.0, 9.0, 3, 0.816496580927726) diff --git a/docs/source/working/streamsets.rst b/docs/source/working/streamsets.rst index 8a9d807..da6c1d0 100644 --- a/docs/source/working/streamsets.rst +++ b/docs/source/working/streamsets.rst @@ -38,8 +38,8 @@ Filtering To apply query parameters to your request, you should use the :code:`filter` method to supply a :code:`start` or :code:`end` argument. -Keep in mind that :code:`filter` will return a new object so you can keep -multiple filtered StreamSets in memory while you explore your data. The +Keep in mind that :code:`filter` will **return a new object so you can keep +multiple filtered StreamSets in memory while you explore your data**. The :code:`filter` method may be called multiple times but only the final values will be used when it is time to fulfill the request by the server. @@ -83,8 +83,8 @@ re.search to choose the streams to include. Retrieving Data ---------------- -There are two options available when you are ready to process the data from the -server. Both options are fully materialized but are organized in different ways +There are three options available when you are ready to process the data from the +server. All options are fully materialized but are organized in different ways according to what is more convenient for you. StreamSet.values() @@ -131,6 +131,10 @@ consists of 4 streams. >> RawPoint(1500000000900000000, 10.0)]] +.. attention:: + + + StreamSet.rows() ^^^^^^^^^^^^^^^^^^ By contrast, the :code:`rows` method aligns data by time rather than by stream. diff --git a/pyproject.toml b/pyproject.toml index 80f656d..a57516f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,12 @@ [project] name = "btrdb" -version = "5.31.0" +version = "5.32.0" authors = [ {name="PingThingsIO", email="support@pingthings.io"}, ] +maintainers = [ + {name="PingThingsIO", email="support@pingthings.io"}, +] description = "Bindings to interact with the Berkeley Tree Database using gRPC." readme = "README.md" license = {file="LICENSE.txt"} @@ -33,10 +36,6 @@ dependencies = [ "pytz", "pyyaml", "certifi", - "pyarrow", - "polars", - "numpy", - "pandas>=2.0", ] [project.optional-dependencies] @@ -65,8 +64,7 @@ all = [ ] [project.urls] -"Homepage" = "https://btrdb.io" -"Docs" = "https://btrdb.readthedocs.io" +"Docs" = "https://btrdb-python.readthedocs.io/" "Repository" = "https://github.com/pingthingsio/btrdb-python.git" [build-system] diff --git a/release.sh b/release.sh index 31f849e..b63f28c 100755 --- a/release.sh +++ b/release.sh @@ -35,14 +35,12 @@ fi echo "Setting version to v$1.$2.$3" -VERSION_CODE="__version_info__ = { 'major': $1, 'minor': $2, 'micro': $3, 'releaselevel': 'final'}" +VERSION_CODE="__version_info__ = {\"major\": $1, \"minor\": $2, \"micro\": $3, \"releaselevel\": \"final\"}" sed -i.bak "s/^__version_info__.*$/${VERSION_CODE}/g" btrdb/version.py sed -i.bak "s/^version.*$/version\ = \"$1.$2.$3\"/g" pyproject.toml git add btrdb/version.py git add pyproject.toml git commit -m "Release v$1.$2.$3" -git tag v$1.$2.$3 -git push origin v$1.$2.$3 -git push +echo "Now make a PR and merge it into main, then make tag :)" diff --git a/setup.cfg b/setup.cfg index 3f445b3..8d1166f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] -description-file = DESCRIPTION.md -license_file = LICENSE.txt +description_file = DESCRIPTION.md +license_files = LICENSE.txt [aliases] test=pytest diff --git a/setup.py b/setup.py index a5b82d1..df6ad76 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ REPOSITORY = "https://github.com/PingThingsIO/btrdb-python" PACKAGE = "btrdb" URL = "http://btrdb.io/" -DOCS_URL = "https://btrdb.readthedocs.io/en/latest/" +DOCS_URL = "https://btrdb-python.readthedocs.io/" ## Define the keywords KEYWORDS = ("btrdb", "berkeley", "timeseries", "database", "bindings" "gRPC") @@ -133,7 +133,6 @@ def get_description_type(path=PKG_DESCRIBE): "license": LICENSE, "author": AUTHOR, "author_email": EMAIL, - "url": URL, "maintainer": MAINTAINER, "maintainer_email": EMAIL, "project_urls": { diff --git a/tests/btrdb/test_base.py b/tests/btrdb/test_base.py index ac55c07..0fddc90 100644 --- a/tests/btrdb/test_base.py +++ b/tests/btrdb/test_base.py @@ -16,11 +16,11 @@ ########################################################################## import os -from unittest.mock import patch +from unittest.mock import MagicMock, Mock, patch import pytest -from btrdb import BTRDB_API_KEY, BTRDB_ENDPOINTS, __version__, connect +from btrdb import BTRDB_API_KEY, BTRDB_ENDPOINTS, Endpoint, __version__, connect from btrdb.exceptions import ConnectionError ########################################################################## @@ -81,21 +81,34 @@ def test_uses_env_over_profile( connect() mock_connect.assert_called_once_with(endpoints="c", apikey="d") + # @patch("btrdb.utils.credentials.credentials_by_env") + @patch("btrdb.utils.credentials.credentials_by_env") @patch("btrdb.utils.credentials.credentials_by_profile") @patch("btrdb.Connection") - def test_connect_with_env(self, mock_conn, mock_credentials_by_profile): + def test_connect_with_env( + self, mock_conn, mock_credentials_by_profile, mock_credentials_by_env + ): """ Assert connect uses ENV variables """ - mock_credentials_by_profile.return_value = {} - address = "127.0.0.1:4410" - os.environ[BTRDB_ENDPOINTS] = address + with patch( + "btrdb.endpoint.Endpoint.info", return_value=Mock(Endpoint) + ) as mock_ep_info: + mock_ep_info.return_value = MagicMock() + mock_credentials_by_profile.return_value = {} + address = "127.0.0.1:4410" + mock_credentials_by_env.return_value = {"endpoints": address} + os.environ[BTRDB_ENDPOINTS] = address - btrdb = connect() - mock_conn.assert_called_once_with(address, apikey=None) - mock_conn.reset_mock() + btrdb = connect() + mock_conn.assert_called_once_with(address, apikey=None) + mock_conn.reset_mock() - apikey = "abcd" - os.environ[BTRDB_API_KEY] = apikey - btrdb = connect() - mock_conn.assert_called_once_with(address, apikey=apikey) + apikey = "abcd" + os.environ[BTRDB_API_KEY] = apikey + mock_credentials_by_env.return_value = { + "endpoints": address, + "apikey": apikey, + } + btrdb = connect() + mock_conn.assert_called_once_with(address, apikey=apikey) diff --git a/tests/btrdb/test_conn.py b/tests/btrdb/test_conn.py index ed00aa9..3720ef2 100644 --- a/tests/btrdb/test_conn.py +++ b/tests/btrdb/test_conn.py @@ -16,7 +16,7 @@ ########################################################################## import uuid as uuidlib -from unittest.mock import Mock, PropertyMock, call, patch +from unittest.mock import MagicMock, Mock, PropertyMock, call, patch import pytest @@ -41,7 +41,7 @@ def stream1(): type(stream).name = PropertyMock(return_value="gala") stream.tags = Mock(return_value={"name": "gala", "unit": "volts"}) stream.annotations = Mock(return_value=({"owner": "ABC", "color": "red"}, 11)) - stream._btrdb = Mock() + stream._btrdb = MagicMock() return stream @@ -55,7 +55,7 @@ def stream2(): type(stream).name = PropertyMock(return_value="blood") stream.tags = Mock(return_value={"name": "blood", "unit": "amps"}) stream.annotations = Mock(return_value=({"owner": "ABC", "color": "orange"}, 22)) - stream._btrdb = Mock() + stream._btrdb = MagicMock() return stream @@ -69,7 +69,7 @@ def stream3(): type(stream).name = PropertyMock(return_value="yellow") stream.tags = Mock(return_value={"name": "yellow", "unit": "watts"}) stream.annotations = Mock(return_value=({"owner": "ABC", "color": "yellow"}, 33)) - stream._btrdb = Mock() + stream._btrdb = MagicMock() return stream @@ -98,110 +98,120 @@ class TestBTrDB(object): ########################################################################## ## .streams tests ########################################################################## + # TODO: remove this when removing future warnings from `streams_in_collection` + pytestmark = pytest.mark.filterwarnings("ignore::FutureWarning") def test_streams_raises_err_if_version_not_list(self): """ Assert streams raises TypeError if versions is not list """ - db = BTrDB(None) - with pytest.raises(TypeError) as exc: - db.streams("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a", versions="2,2") + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + with pytest.raises(TypeError) as exc: + db.streams("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a", versions="2,2") - assert "versions argument must be of type list" in str(exc) + assert "versions argument must be of type list" in str(exc) def test_streams_raises_err_if_version_argument_mismatch(self): """ Assert streams raises ValueError if len(identifiers) doesnt match length of versions """ - db = BTrDB(None) - with pytest.raises(ValueError) as exc: - db.streams("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a", versions=[2, 2]) + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + with pytest.raises(ValueError) as exc: + db.streams("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a", versions=[2, 2]) - assert "versions does not match identifiers" in str(exc) + assert "versions does not match identifiers" in str(exc) def test_streams_stores_versions(self): """ Assert streams correctly stores supplied version info """ - db = BTrDB(None) - uuid1 = uuidlib.UUID("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") - uuid2 = uuidlib.UUID("17dbe387-89ea-42b6-864b-f505cdb483f5") - versions = [22, 44] - expected = dict(zip([uuid1, uuid2], versions)) + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + uuid1 = uuidlib.UUID("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") + uuid2 = uuidlib.UUID("17dbe387-89ea-42b6-864b-f505cdb483f5") + versions = [22, 44] + expected = dict(zip([uuid1, uuid2], versions)) - streams = db.streams(uuid1, uuid2, versions=versions) - assert streams._pinned_versions == expected + streams = db.streams(uuid1, uuid2, versions=versions) + assert streams._pinned_versions == expected @patch("btrdb.conn.BTrDB.stream_from_uuid") def test_streams_recognizes_uuid(self, mock_func): """ Assert streams recognizes uuid strings """ - db = BTrDB(None) - uuid1 = uuidlib.UUID("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") - mock_func.return_value = Stream(db, uuid1) - db.streams(uuid1) + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + uuid1 = uuidlib.UUID("0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") + mock_func.return_value = Stream(db, uuid1) + db.streams(uuid1) - mock_func.assert_called_once() - assert mock_func.call_args[0][0] == uuid1 + mock_func.assert_called_once() + assert mock_func.call_args[0][0] == uuid1 @patch("btrdb.conn.BTrDB.stream_from_uuid") def test_streams_recognizes_uuid_string(self, mock_func): """ Assert streams recognizes uuid strings """ - db = BTrDB(None) - uuid1 = "0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a" - mock_func.return_value = Stream(db, uuid1) - db.streams(uuid1) + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + uuid1 = "0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a" + mock_func.return_value = Stream(db, uuid1) + db.streams(uuid1) - mock_func.assert_called_once() - assert mock_func.call_args[0][0] == uuid1 + mock_func.assert_called_once() + assert mock_func.call_args[0][0] == uuid1 @patch("btrdb.conn.BTrDB.streams_in_collection") def test_streams_handles_path(self, mock_func): """ Assert streams calls streams_in_collection for collection/name paths """ - db = BTrDB(None) - ident = "zoo/animal/dog" - mock_func.return_value = [ - Stream(db, "0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a"), - ] - db.streams(ident, "0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") - - mock_func.assert_called_once() - assert mock_func.call_args[0][0] == "zoo/animal" - assert mock_func.call_args[1] == { - "is_collection_prefix": False, - "tags": {"name": "dog"}, - } + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + ident = "zoo/animal/dog" + mock_func.return_value = [ + Stream(db, "0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a"), + ] + db.streams(ident, "0d22a53b-e2ef-4e0a-ab89-b2d48fb2592a") + + mock_func.assert_called_once() + assert mock_func.call_args[0][0] == "zoo/animal" + assert mock_func.call_args[1] == { + "is_collection_prefix": False, + "tags": {"name": "dog"}, + } @patch("btrdb.conn.BTrDB.streams_in_collection") def test_streams_raises_err(self, mock_func): """ Assert streams raises StreamNotFoundError """ - db = BTrDB(None) - ident = "zoo/animal/dog" - - mock_func.return_value = [] - with pytest.raises(StreamNotFoundError) as exc: + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + ident = "zoo/animal/dog" + + mock_func.return_value = [] + with pytest.raises(StreamNotFoundError) as exc: + db.streams(ident) + + # check that does not raise if one returned + mock_func.return_value = [ + Stream(db, ident), + ] db.streams(ident) - # check that does not raise if one returned - mock_func.return_value = [ - Stream(db, ident), - ] - db.streams(ident) - def test_streams_raises_valueerror(self): """ Assert streams raises ValueError if not uuid, uuid str, or path """ - db = BTrDB(None) - with pytest.raises(ValueError) as exc: - db.streams(11) + with patch("btrdb.endpoint.Endpoint", return_value=MagicMock()) as ep: + db = BTrDB(ep) + with pytest.raises(ValueError) as exc: + db.streams(11) ########################################################################## ## other tests diff --git a/tests/btrdb/test_transformers.py b/tests/btrdb/test_transformers.py index 833de69..c7ce719 100644 --- a/tests/btrdb/test_transformers.py +++ b/tests/btrdb/test_transformers.py @@ -601,7 +601,7 @@ def test_to_series(self, streamset): def test_to_series_name_lambda(self, streamset): """ - assert to_dateframe uses name lambda + assert to_series uses name lambda """ result = streamset.to_series(name_callable=lambda s: s.name) assert [s.name for s in result] == ["stream0", "stream1", "stream2", "stream3"] @@ -691,23 +691,16 @@ def test_to_dataframe(self, streamset): df.set_index("time", inplace=True) assert to_dataframe(streamset).equals(df) - def test_to_dataframe_column_issues_warning(self, statpoint_streamset): + def test_to_dataframe_column_issues_error(self, statpoint_streamset): """ - assert to_dateframe with column argument issues warning + assert to_dateframe with column argument issues error """ columns = ["test/cats", "test/dogs", "test/horses", "test/fishes"] - with pytest.deprecated_call(): + with pytest.raises(TypeError) as unexpected_key_err: statpoint_streamset.to_dataframe(columns=columns) - - def test_to_dataframe_column(self, statpoint_streamset): - """ - assert to_dateframe with column argument actually renames columns - """ - columns = ["test/cats", "test/dogs", "test/horses", "test/fishes"] - with pytest.deprecated_call(): - df = statpoint_streamset.to_dataframe(columns=columns) - - assert df.columns.tolist() == columns + assert "got an unexpected keyword argument 'columns'" in str( + unexpected_key_err.value + ) def test_to_dataframe_multindex(self, statpoint_streamset): """ diff --git a/tests/btrdb/utils/test_general.py b/tests/btrdb/utils/test_general.py index a13cf3c..0edb86e 100644 --- a/tests/btrdb/utils/test_general.py +++ b/tests/btrdb/utils/test_general.py @@ -56,6 +56,36 @@ def test_from_nanoseconds(self, nsec, expected): """ assert pointwidth.from_nanoseconds(nsec) == expected + @pytest.mark.parametrize( + "pw_time_range, expected", + [ + ( + (52, 1560127027000000000, 1702702800000000000), + (1558245471070191616, 1697857059518676992, 32), + ), + ( + (48, 1560127027000000000, 1702702800000000000), + (1559934320930455552, 1702360659146047488, 507), + ), + ( + (44, 1456876800000000000, 1464652800000000000), + (1456861702896222208, 1464619856941809664, 442), + ), + ( + (38, 1456876800000000000, 1464652800000000000), + (1456876546303197184, 1464652292534829056, 28289), + ), + ( + (32, 1456876800000000000, 1464652800000000000), + (1456876799706267648, 1464652795046002688, 1810491), + ), + ], + ) + def test_for_aligned_windows(self, pw_time_range, expected): + pw, start, end = pw_time_range + result = pointwidth(pw).for_aligned_windows(start, end) + assert result == expected + def test_time_conversions(self): """ Test standard pointwidth time conversions @@ -114,3 +144,19 @@ def test_incr(self): Test incrementing a pointwidth """ assert pointwidth(23).incr() == 24 + + @pytest.mark.parametrize( + "pw, expected", + [ + (54, timedelta(days=208, seconds=43198, microseconds=509482)), + (51, timedelta(days=26, seconds=5399, microseconds=813685)), + (49, timedelta(days=6, seconds=44549, microseconds=953421)), + (46, timedelta(seconds=70368, microseconds=744178)), + (43, timedelta(seconds=8796, microseconds=93022)), + (39, timedelta(seconds=549, microseconds=755814)), + (34, timedelta(seconds=17, microseconds=179869)), + (30, timedelta(seconds=1, microseconds=73742)), + ], + ) + def test_to_timedelta(self, pw, expected): + assert pointwidth(pw).to_timedelta() == expected diff --git a/tests/btrdb_integration/test_conn.py b/tests/btrdb_integration/test_conn.py index 5d30c4f..bc6c9ac 100644 --- a/tests/btrdb_integration/test_conn.py +++ b/tests/btrdb_integration/test_conn.py @@ -1,36 +1,57 @@ import logging +import os from uuid import uuid4 as new_uuid import pytest import btrdb - - -def test_connection_info(conn): - info = conn.info() - logging.info(f"connection info: {info}") - - -def test_create_stream(conn, tmp_collection): - uuid = new_uuid() - stream = conn.create(uuid, tmp_collection, tags={"name": "s"}) - assert stream.uuid == uuid - assert stream.name == "s" - - -def test_query(conn, tmp_collection): - conn.create(new_uuid(), tmp_collection, tags={"name": "s1"}) - conn.create(new_uuid(), tmp_collection, tags={"name": "s2"}) - uuids = conn.query( - "select name from streams where collection = $1 order by name;", - [tmp_collection], +from btrdb.exceptions import BTrDBError + + +@pytest.mark.skipif( + os.getenv("BTRDB_INTEGRATION_TEST_PROFILE") is None, + reason="Need an integration test server to run integration tests.", +) +class TestConnection: + def test_connection_info(self, conn): + info = conn.info() + logging.info(f"connection info: {info}") + + def test_incorrect_connect( + self, + ): + err_msg = r"""Could not connect to the database, error message: <_InactiveRpcError of RPC that terminated with:\n\tstatus = StatusCode.UNAUTHENTICATED\n\tdetails = "invalid api key"\n""" + with pytest.raises(BTrDBError, match=err_msg): + conn = btrdb.connect(conn_str="127.0.0.1:4410", apikey="BOGUS_KEY") + + @pytest.mark.xfail( + reason="Should return BTrDBError, but returns GRPCError instead, FIXME" ) - assert len(uuids) == 2 - assert uuids[0]["name"] == "s1" - assert uuids[1]["name"] == "s2" - - -def test_list_collections(conn, tmp_collection): - assert tmp_collection not in conn.list_collections() - stream = conn.create(new_uuid(), tmp_collection, tags={"name": "s"}) - assert tmp_collection in conn.list_collections() + def test_from_uuid(self, conn): + # todo, investigate this, the stat code returned is 0, which is why its not being returned as a btrdb error + uu = new_uuid() + stream = conn.stream_from_uuid(uu) + with pytest.raises(btrdb.exceptions.BTrDBError): + print(stream) + + def test_create_stream(self, conn, tmp_collection): + uuid = new_uuid() + stream = conn.create(uuid, tmp_collection, tags={"name": "s"}) + assert stream.uuid == uuid + assert stream.name == "s" + + def test_query(self, conn, tmp_collection): + conn.create(new_uuid(), tmp_collection, tags={"name": "s1"}) + conn.create(new_uuid(), tmp_collection, tags={"name": "s2"}) + uuids = conn.query( + "select name from streams where collection = $1 order by name;", + [tmp_collection], + ) + assert len(uuids) == 2 + assert uuids[0]["name"] == "s1" + assert uuids[1]["name"] == "s2" + + def test_list_collections(self, conn, tmp_collection): + assert tmp_collection not in conn.list_collections() + stream = conn.create(new_uuid(), tmp_collection, tags={"name": "s"}) + assert tmp_collection in conn.list_collections() diff --git a/tests/btrdb_integration/test_stream.py b/tests/btrdb_integration/test_stream.py index e65c71d..5fba2a1 100644 --- a/tests/btrdb_integration/test_stream.py +++ b/tests/btrdb_integration/test_stream.py @@ -61,6 +61,24 @@ def test_arrow_insert_and_values( assert data == fetched_data +def test_arrow_values_template_schema(conn, tmp_collection): + s = conn.create(new_uuid(), tmp_collection, tags={"name": "s"}) + t = currently_as_ns() + times = [100, 200, 300, 400] + values = [1.0, 2.0, 3.0, 4.0] + s.insert(list(zip(times, values))) + schema = pa.schema( + [ + pa.field("t", pa.int64(), nullable=False), + pa.field("v", pa.float32(), nullable=False), + ] + ) + expected = pa.Table.from_arrays([pa.array(times), pa.array(values)], schema=schema) + fetched_data = s.arrow_values(start=times[0], end=times[-1] + 1, schema=schema) + assert expected == fetched_data + assert expected.schema.equals(fetched_data.schema) + + @pytest.mark.parametrize( "merge_policy,duplicates_expected", [("never", True), ("equal", True), ("retain", False), ("replace", False)], @@ -103,7 +121,7 @@ def test_arrow_values_table_schema( assert single_stream_values_arrow_schema.equals(fetched_data.schema) -def test_arrow_values_table_schema( +def test_arrow_values_table_schema_2( conn, tmp_collection, single_stream_values_arrow_schema ): s = conn.create(new_uuid(), tmp_collection, tags={"name": "s"}) @@ -330,19 +348,21 @@ def test_arrow_empty_values_schema(conn, tmp_collection): assert schema.equals(data.schema) -@pytest.mark.xfail def test_stream_annotation_update(conn, tmp_collection): - # XXX marked as expected failure until someone has time to investigate. s = conn.create( - new_uuid(), tmp_collection, tags={"name": "s"}, annotations={"foo": "bar"} + new_uuid(), + tmp_collection + "foo/", + tags={"name": "s"}, + annotations={"foo": "bar"}, ) annotations1, version1 = s.annotations() - assert version1 == 0 + assert version1 == 1 assert annotations1["foo"] == "bar" s.update(annotations={"foo": "baz"}) annotations2, version2 = s.annotations() assert version2 > version1 assert annotations2["foo"] == "baz" s.update(annotations={}, replace=True) - annotations3, _ = s.annotations() + annotations3, version3 = s.annotations() assert len(annotations3) == 0 + assert version3 > version2 diff --git a/tests/btrdb_integration/test_streamset.py b/tests/btrdb_integration/test_streamset.py index d173818..5a0fbec 100644 --- a/tests/btrdb_integration/test_streamset.py +++ b/tests/btrdb_integration/test_streamset.py @@ -73,6 +73,36 @@ def test_streamset_arrow_values(conn, tmp_collection): assert expected_schema.equals(values.schema) +def test_streamset_template_schema(conn, tmp_collection): + s1 = conn.create(new_uuid(), tmp_collection, tags={"name": "s1"}) + s2 = conn.create(new_uuid(), tmp_collection, tags={"name": "s2"}) + t1 = [100, 105, 110, 115, 120] + t2 = [101, 106, 110, 114, 119] + d1 = [0.0, 1.0, 2.0, 3.0, 4.0] + d2 = [5.0, 6.0, 7.0, 8.0, 9.0] + s1.insert(list(zip(t1, d1))) + s2.insert(list(zip(t2, d2))) + schema = pa.schema( + [ + pa.field("t", pa.timestamp("ns", tz="UTC"), nullable=False), + pa.field("a", pa.float64(), nullable=False), + pa.field("b", pa.float32(), nullable=False), + ] + ) + ss = btrdb.stream.StreamSet([s1, s2]).filter(start=100, end=121, schema=schema) + expected_times = [100, 101, 105, 106, 110, 114, 115, 119, 120] + expected_col1 = [0.0, np.NaN, 1.0, np.NaN, 2.0, np.NaN, 3.0, np.NaN, 4.0] + expected_col2 = [np.NaN, 5.0, np.NaN, 6.0, 7.0, 8.0, np.NaN, 9.0, np.NaN] + values = ss.arrow_values() + times = [t.value for t in values["t"]] + col1 = [np.NaN if isnan(v.as_py()) else v.as_py() for v in values["a"]] + col2 = [np.NaN if isnan(v.as_py()) else v.as_py() for v in values["b"]] + assert times == expected_times + assert col1 == expected_col1 + assert col2 == expected_col2 + assert schema.equals(values.schema) + + @pytest.mark.parametrize( "name_callable", [(None), (lambda s: str(s.uuid)), (lambda s: s.name + "/" + s.collection)], @@ -96,7 +126,6 @@ def test_streamset_arrow_windows_vs_windows(conn, tmp_collection, name_callable) .windows(width=btrdb.utils.timez.ns_delta(nanoseconds=10)) ) values_arrow = ss.arrow_to_dataframe(name_callable=name_callable) - values_arrow.set_index("time", inplace=True) values_arrow.index = pd.DatetimeIndex(values_arrow.index) values_prev = ss.to_dataframe(name_callable=name_callable).convert_dtypes( dtype_backend="pyarrow" @@ -128,7 +157,6 @@ def test_streamset_arrow_windows_vs_windows_agg_all(conn, tmp_collection): .windows(width=btrdb.utils.timez.ns_delta(nanoseconds=10)) ) values_arrow = ss.arrow_to_dataframe(name_callable=None, agg=["all"]) - values_arrow.set_index("time", inplace=True) values_arrow.index = pd.DatetimeIndex(values_arrow.index) values_prev = ss.to_dataframe(name_callable=None, agg="all") values_prev = values_prev.apply(lambda x: x.astype(str(x.dtype) + "[pyarrow]")) @@ -181,10 +209,48 @@ def test_streamset_arrow_aligned_windows_vs_aligned_windows( ss = ( btrdb.stream.StreamSet([s1, s2, s3]) .filter(start=100, end=121) - .windows(width=btrdb.utils.general.pointwidth.from_nanoseconds(10)) + .aligned_windows(pointwidth=btrdb.utils.general.pointwidth.from_nanoseconds(10)) + ) + values_arrow = ss.arrow_to_dataframe(name_callable=name_callable) + values_arrow.index = pd.DatetimeIndex(values_arrow.index) + values_prev = ss.to_dataframe( + name_callable=name_callable + ) # .convert_dtypes(dtype_backend='pyarrow') + values_prev = values_prev.apply(lambda x: x.astype(str(x.dtype) + "[pyarrow]")) + values_prev = values_prev.apply( + lambda x: x.astype("uint64[pyarrow]") if "count" in x.name else x + ) + values_prev.index = pd.DatetimeIndex(values_prev.index, tz="UTC") + col_map = {old_col: old_col + "/mean" for old_col in values_prev.columns} + values_prev = values_prev.rename(columns=col_map) + assert values_arrow.equals(values_prev) + + +@pytest.mark.parametrize( + "name_callable", + [(None), (lambda s: str(s.uuid)), (lambda s: s.name + "/" + s.collection)], + ids=["empty", "uu_as_str", "name_collection"], +) +def test_streamset_arrow_aligned_windows_join_logic( + conn, tmp_collection, name_callable +): + s1 = conn.create(new_uuid(), tmp_collection, tags={"name": "s1"}) + s2 = conn.create(new_uuid(), tmp_collection, tags={"name": "s2"}) + s3 = conn.create(new_uuid(), tmp_collection, tags={"name": "s3"}) + t1 = [100, 105, 110, 115, 120] + t2 = [101, 106, 110, 132, 140] + d1 = [0.0, 1.0, 2.0, 3.0, 4.0] + d2 = [5.0, 6.0, 7.0, 8.0, 9.0] + d3 = [1.0, 9.0, 44.0, 8.0, 9.0] + s1.insert(list(zip(t1, d1))) + s2.insert(list(zip(t2, d2))) + s3.insert(list(zip(t2, d3))) + ss = ( + btrdb.stream.StreamSet([s1, s2, s3]) + .filter(start=100, end=141) + .aligned_windows(pointwidth=btrdb.utils.general.pointwidth.from_nanoseconds(8)) ) values_arrow = ss.arrow_to_dataframe(name_callable=name_callable) - values_arrow.set_index("time", inplace=True) values_arrow.index = pd.DatetimeIndex(values_arrow.index) values_prev = ss.to_dataframe( name_callable=name_callable @@ -247,7 +313,6 @@ def test_arrow_streamset_to_dataframe(conn, tmp_collection): s2.insert(list(zip(t2, d2))) ss = btrdb.stream.StreamSet([s1, s2]).filter(start=100, end=121) values = ss.arrow_to_dataframe() - values.set_index("time", inplace=True) expected_times = [100, 101, 105, 106, 110, 114, 115, 119, 120] expected_times = [ pa.scalar(v, type=pa.timestamp("ns", tz="UTC")).as_py() for v in expected_times @@ -270,11 +335,11 @@ def test_arrow_streamset_to_dataframe(conn, tmp_collection): pa.field(tmp_collection + "/s2", type=pa.float64(), nullable=False), ] ) - expected_table = pa.Table.from_pydict(expected_dat, schema=schema) + expected_table = pa.Table.from_pydict(mapping=expected_dat, schema=schema) expected_df = expected_table.to_pandas( - timestamp_as_object=False, types_mapper=pd.ArrowDtype - ) - expected_df.set_index("time", inplace=True) + timestamp_as_object=False, + types_mapper=pd.ArrowDtype, + ).set_index("time") expected_df.index = pd.DatetimeIndex(expected_df.index, tz="UTC") np_test.assert_array_equal( values.values.astype(float), expected_df.values.astype(float) @@ -303,9 +368,9 @@ def test_arrow_streamset_to_polars(conn, tmp_collection): tmp_collection + "/s2": expected_col2, } expected_df = pd.DataFrame( - expected_dat, index=pd.DatetimeIndex(expected_times) - ).reset_index(names="time") - expected_df_pl = pl.from_pandas(expected_df, nan_to_null=False) + expected_dat, index=pd.DatetimeIndex(expected_times, name="time") + ) + expected_df_pl = pl.from_pandas(expected_df, nan_to_null=False, include_index=True) pl_test.assert_frame_equal(values, expected_df_pl) @@ -337,9 +402,9 @@ def test_streamset_arrow_polars_vs_old_to_polars(conn, tmp_collection, name_call tmp_collection + "/s2": expected_col2, } expected_df = pd.DataFrame( - expected_dat, index=pd.DatetimeIndex(expected_times, tz="UTC") - ).reset_index(names="time") - expected_df_pl = pl.from_pandas(expected_df, nan_to_null=False) + expected_dat, index=pd.DatetimeIndex(expected_times, tz="UTC", name="time") + ) + expected_df_pl = pl.from_pandas(expected_df, nan_to_null=False, include_index=True) pl_test.assert_frame_equal(values_arrow, expected_df_pl) pl_test.assert_frame_equal(values_non_arrow, expected_df_pl) pl_test.assert_frame_equal(values_non_arrow, values_arrow) @@ -377,7 +442,7 @@ def test_streamset_windows_arrow_polars_vs_old_to_polars( } new_names["time"] = "time" values_non_arrow_pl = values_non_arrow_pl.rename(mapping=new_names) - assert values_arrow_pl.frame_equal(values_non_arrow_pl) + assert values_arrow_pl.equals(values_non_arrow_pl) def test_streamset_windows_aggregates_filter(conn, tmp_collection): @@ -398,7 +463,6 @@ def test_streamset_windows_aggregates_filter(conn, tmp_collection): .windows(width=btrdb.utils.timez.ns_delta(nanoseconds=10)) ) values_arrow_df = ss.arrow_to_dataframe(agg=["mean", "stddev"]) - values_arrow_df.set_index("time", inplace=True) values_arrow_df.index = pd.DatetimeIndex(values_arrow_df.index) values_non_arrow_df = ss.to_dataframe(agg="all") values_non_arrow_df.index = pd.DatetimeIndex(values_non_arrow_df.index, tz="UTC") @@ -487,7 +551,6 @@ def test_timesnap_with_different_sampling_frequencies(freq, conn, tmp_collection df = stset.filter( start=start, end=stop, sampling_frequency=freq ).arrow_to_dataframe() - df.set_index("time", inplace=True) total_points = df.shape[0] * df.shape[1] total_raw_pts = len(v1) * len(stset) expected_frac_of_pts = 1 if freq is None else freq / data_insert_freq