- As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
+
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
Library versions released prior to that date will continue to be available. For more information please
visit
Python 2 support on Google Cloud.
diff --git a/docs/api-reference.rst b/docs/api-reference.rst
deleted file mode 100644
index 41046f78bf..0000000000
--- a/docs/api-reference.rst
+++ /dev/null
@@ -1,34 +0,0 @@
-API Reference
-=============
-
-The following classes and methods constitute the Spanner client.
-Most likely, you will be interacting almost exclusively with these:
-
-.. toctree::
- :maxdepth: 1
-
- client-api
- instance-api
- database-api
- table-api
- session-api
- keyset-api
- snapshot-api
- batch-api
- transaction-api
- streamed-api
-
-
-The classes and methods above depend on the following, lower-level
-classes and methods. Documentation for these is provided for completion,
-and some advanced use cases may wish to interact with these directly:
-
-.. toctree::
- :maxdepth: 1
-
- spanner_v1/services
- spanner_v1/types
- spanner_admin_database_v1/services
- spanner_admin_database_v1/types
- spanner_admin_instance_v1/services
- spanner_admin_instance_v1/types
diff --git a/docs/client-usage.rst b/docs/client-usage.rst
index ce13bf4aa0..7ba3390e59 100644
--- a/docs/client-usage.rst
+++ b/docs/client-usage.rst
@@ -1,5 +1,5 @@
-Spanner Client
-==============
+Spanner Client Usage
+====================
.. _spanner-client:
diff --git a/docs/conf.py b/docs/conf.py
index 96337defe2..010a6b6cda 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
-# Copyright 2021 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+#
# google-cloud-spanner documentation build configuration file
#
# This file is execfile()d with the current directory set to its
@@ -42,7 +43,7 @@
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = "1.5.5"
+needs_sphinx = "4.5.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -80,9 +81,9 @@
root_doc = "index"
# General information about the project.
-project = "google-cloud-spanner"
-copyright = "2019, Google"
-author = "Google APIs"
+project = u"google-cloud-spanner"
+copyright = u"2025, Google, LLC"
+author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -156,7 +157,7 @@
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-spanner",
"github_user": "googleapis",
- "github_repo": "python-spanner",
+ "github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
@@ -266,13 +267,13 @@
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
- #'papersize': 'letterpaper',
+ # 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
- #'pointsize': '10pt',
+ # 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
- #'preamble': '',
+ # 'preamble': '',
# Latex figure (float) alignment
- #'figure_align': 'htbp',
+ # 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
@@ -282,7 +283,7 @@
(
root_doc,
"google-cloud-spanner.tex",
- "google-cloud-spanner Documentation",
+ u"google-cloud-spanner Documentation",
author,
"manual",
)
diff --git a/docs/database-usage.rst b/docs/database-usage.rst
index 629f1ab28a..afcfa06cb2 100644
--- a/docs/database-usage.rst
+++ b/docs/database-usage.rst
@@ -1,5 +1,5 @@
-Database Admin
-==============
+Database Admin Usage
+====================
After creating an :class:`~google.cloud.spanner_v1.instance.Instance`, you can
interact with individual databases for that instance.
diff --git a/docs/index.rst b/docs/index.rst
index a4ab1b27d7..0de0483409 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -5,27 +5,48 @@
Usage Documentation
-------------------
.. toctree::
- :maxdepth: 1
- :titlesonly:
+ :maxdepth: 2
client-usage
- instance-usage
- database-usage
table-usage
batch-usage
snapshot-usage
transaction-usage
+ database-usage
+ instance-usage
+
API Documentation
-----------------
.. toctree::
:maxdepth: 1
:titlesonly:
- api-reference
advanced-session-pool-topics
opentelemetry-tracing
+ spanner_v1/client
+ spanner_v1/instance
+ spanner_v1/database
+ spanner_v1/table
+ spanner_v1/session
+ spanner_v1/keyset
+ spanner_v1/snapshot
+ spanner_v1/batch
+ spanner_v1/transaction
+ spanner_v1/streamed
+
+ spanner_v1/services_
+ spanner_v1/types_
+ spanner_admin_database_v1/services_
+ spanner_admin_database_v1/types_
+ spanner_admin_database_v1/database_admin
+ spanner_admin_instance_v1/services_
+ spanner_admin_instance_v1/types_
+ spanner_admin_instance_v1/instance_admin
+
+
+
Changelog
---------
@@ -35,3 +56,8 @@ For a list of all ``google-cloud-spanner`` releases:
:maxdepth: 2
changelog
+
+.. toctree::
+ :hidden:
+
+ summary_overview.md
diff --git a/docs/instance-usage.rst b/docs/instance-usage.rst
index 55042c2df3..b45b69acc6 100644
--- a/docs/instance-usage.rst
+++ b/docs/instance-usage.rst
@@ -1,5 +1,5 @@
-Instance Admin
-==============
+Instance Admin Usage
+====================
After creating a :class:`~google.cloud.spanner_v1.client.Client`, you can
interact with individual instances for a project.
diff --git a/docs/opentelemetry-tracing.rst b/docs/opentelemetry-tracing.rst
index 9b3dea276f..c581d2cb87 100644
--- a/docs/opentelemetry-tracing.rst
+++ b/docs/opentelemetry-tracing.rst
@@ -8,10 +8,8 @@ To take advantage of these traces, we first need to install OpenTelemetry:
.. code-block:: sh
- pip install opentelemetry-api opentelemetry-sdk opentelemetry-instrumentation
-
- # [Optional] Installs the cloud monitoring exporter, however you can use any exporter of your choice
- pip install opentelemetry-exporter-google-cloud
+ pip install opentelemetry-api opentelemetry-sdk
+ pip install opentelemetry-exporter-gcp-trace
We also need to tell OpenTelemetry which exporter to use. To export Spanner traces to `Cloud Tracing
`_, add the following lines to your application:
@@ -19,22 +17,80 @@ We also need to tell OpenTelemetry which exporter to use. To export Spanner trac
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
- from opentelemetry.trace.sampling import ProbabilitySampler
+ from opentelemetry.sdk.trace.sampling import TraceIdRatioBased
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
- # BatchExportSpanProcessor exports spans to Cloud Trace
+ # BatchSpanProcessor exports spans to Cloud Trace
# in a seperate thread to not block on the main thread
- from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
# Create and export one trace every 1000 requests
- sampler = ProbabilitySampler(1/1000)
- # Use the default tracer provider
- trace.set_tracer_provider(TracerProvider(sampler=sampler))
- trace.get_tracer_provider().add_span_processor(
+ sampler = TraceIdRatioBased(1/1000)
+ tracer_provider = TracerProvider(sampler=sampler)
+ tracer_provider.add_span_processor(
# Initialize the cloud tracing exporter
- BatchExportSpanProcessor(CloudTraceSpanExporter())
+ BatchSpanProcessor(CloudTraceSpanExporter())
+ )
+ observability_options = dict(
+ tracer_provider=tracer_provider,
+
+ # By default extended_tracing is set to True due
+ # to legacy reasons to avoid breaking changes, you
+ # can modify it though using the environment variable
+ # SPANNER_ENABLE_EXTENDED_TRACING=false.
+ enable_extended_tracing=False,
+
+ # By default end to end tracing is set to False. Set to True
+ # for getting spans for Spanner server.
+ enable_end_to_end_tracing=True,
)
+ spanner = spanner.NewClient(project_id, observability_options=observability_options)
+
+
+To get more fine-grained traces from gRPC, you can enable the gRPC instrumentation by the following
+
+.. code-block:: sh
+
+ pip install opentelemetry-instrumentation opentelemetry-instrumentation-grpc
+
+and then in your Python code, please add the following lines:
+
+.. code:: python
+
+ from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient
+ grpc_client_instrumentor = GrpcInstrumentorClient()
+ grpc_client_instrumentor.instrument()
+
Generated spanner traces should now be available on `Cloud Trace `_.
Tracing is most effective when many libraries are instrumented to provide insight over the entire lifespan of a request.
For a list of libraries that can be instrumented, see the `OpenTelemetry Integrations` section of the `OpenTelemetry Python docs `_
+
+Annotating spans with SQL
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default your spans will be annotated with SQL statements where appropriate, but that can be a PII (Personally Identifiable Information)
+leak. Sadly due to legacy behavior, we cannot simply turn off this behavior by default. However you can control this behavior by setting
+
+ SPANNER_ENABLE_EXTENDED_TRACING=false
+
+to turn it off globally or when creating each SpannerClient, please set `observability_options.enable_extended_tracing=false`
+
+End to end tracing
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to client-side tracing, you can opt in for end-to-end tracing. End-to-end tracing helps you understand and debug latency issues that are specific to Spanner. Refer [here](https://cloud.google.com/spanner/docs/tracing-overview) for more information.
+
+To configure end-to-end tracing.
+
+1. Opt in for end-to-end tracing. You can opt-in by either:
+* Setting the environment variable `SPANNER_ENABLE_END_TO_END_TRACING=true` before your application is started
+* In code, by setting `observability_options.enable_end_to_end_tracing=true` when creating each SpannerClient.
+
+2. Set the trace context propagation in OpenTelemetry.
+
+.. code:: python
+
+ from opentelemetry.propagate import set_global_textmap
+ from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
+ set_global_textmap(TraceContextTextMapPropagator())
\ No newline at end of file
diff --git a/docs/spanner_admin_database_v1/services.rst b/docs/spanner_admin_database_v1/services_.rst
similarity index 100%
rename from docs/spanner_admin_database_v1/services.rst
rename to docs/spanner_admin_database_v1/services_.rst
diff --git a/docs/spanner_admin_database_v1/types.rst b/docs/spanner_admin_database_v1/types_.rst
similarity index 91%
rename from docs/spanner_admin_database_v1/types.rst
rename to docs/spanner_admin_database_v1/types_.rst
index 95e1d7f88b..fe6c27778b 100644
--- a/docs/spanner_admin_database_v1/types.rst
+++ b/docs/spanner_admin_database_v1/types_.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Spanner Admin Database v1 API
.. automodule:: google.cloud.spanner_admin_database_v1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/spanner_admin_instance_v1/services.rst b/docs/spanner_admin_instance_v1/services_.rst
similarity index 100%
rename from docs/spanner_admin_instance_v1/services.rst
rename to docs/spanner_admin_instance_v1/services_.rst
diff --git a/docs/spanner_admin_instance_v1/types.rst b/docs/spanner_admin_instance_v1/types_.rst
similarity index 91%
rename from docs/spanner_admin_instance_v1/types.rst
rename to docs/spanner_admin_instance_v1/types_.rst
index 8f7204ebce..250cf6bf9b 100644
--- a/docs/spanner_admin_instance_v1/types.rst
+++ b/docs/spanner_admin_instance_v1/types_.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Spanner Admin Instance v1 API
.. automodule:: google.cloud.spanner_admin_instance_v1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/batch-api.rst b/docs/spanner_v1/batch.rst
similarity index 100%
rename from docs/batch-api.rst
rename to docs/spanner_v1/batch.rst
diff --git a/docs/client-api.rst b/docs/spanner_v1/client.rst
similarity index 100%
rename from docs/client-api.rst
rename to docs/spanner_v1/client.rst
diff --git a/docs/database-api.rst b/docs/spanner_v1/database.rst
similarity index 100%
rename from docs/database-api.rst
rename to docs/spanner_v1/database.rst
diff --git a/docs/instance-api.rst b/docs/spanner_v1/instance.rst
similarity index 100%
rename from docs/instance-api.rst
rename to docs/spanner_v1/instance.rst
diff --git a/docs/keyset-api.rst b/docs/spanner_v1/keyset.rst
similarity index 100%
rename from docs/keyset-api.rst
rename to docs/spanner_v1/keyset.rst
diff --git a/docs/spanner_v1/services.rst b/docs/spanner_v1/services_.rst
similarity index 100%
rename from docs/spanner_v1/services.rst
rename to docs/spanner_v1/services_.rst
diff --git a/docs/session-api.rst b/docs/spanner_v1/session.rst
similarity index 100%
rename from docs/session-api.rst
rename to docs/spanner_v1/session.rst
diff --git a/docs/snapshot-api.rst b/docs/spanner_v1/snapshot.rst
similarity index 100%
rename from docs/snapshot-api.rst
rename to docs/spanner_v1/snapshot.rst
diff --git a/docs/streamed-api.rst b/docs/spanner_v1/streamed.rst
similarity index 100%
rename from docs/streamed-api.rst
rename to docs/spanner_v1/streamed.rst
diff --git a/docs/table-api.rst b/docs/spanner_v1/table.rst
similarity index 100%
rename from docs/table-api.rst
rename to docs/spanner_v1/table.rst
diff --git a/docs/transaction-api.rst b/docs/spanner_v1/transaction.rst
similarity index 100%
rename from docs/transaction-api.rst
rename to docs/spanner_v1/transaction.rst
diff --git a/docs/spanner_v1/types.rst b/docs/spanner_v1/types_.rst
similarity index 88%
rename from docs/spanner_v1/types.rst
rename to docs/spanner_v1/types_.rst
index 8678aba188..c7ff7e6c71 100644
--- a/docs/spanner_v1/types.rst
+++ b/docs/spanner_v1/types_.rst
@@ -3,5 +3,4 @@ Types for Google Cloud Spanner v1 API
.. automodule:: google.cloud.spanner_v1.types
:members:
- :undoc-members:
:show-inheritance:
diff --git a/docs/summary_overview.md b/docs/summary_overview.md
new file mode 100644
index 0000000000..ffaf71df07
--- /dev/null
+++ b/docs/summary_overview.md
@@ -0,0 +1,22 @@
+[
+This is a templated file. Adding content to this file may result in it being
+reverted. Instead, if you want to place additional content, create an
+"overview_content.md" file in `docs/` directory. The Sphinx tool will
+pick up on the content and merge the content.
+]: #
+
+# Cloud Spanner API
+
+Overview of the APIs available for Cloud Spanner API.
+
+## All entries
+
+Classes, methods and properties & attributes for
+Cloud Spanner API.
+
+[classes](https://cloud.google.com/python/docs/reference/spanner/latest/summary_class.html)
+
+[methods](https://cloud.google.com/python/docs/reference/spanner/latest/summary_method.html)
+
+[properties and
+attributes](https://cloud.google.com/python/docs/reference/spanner/latest/summary_property.html)
diff --git a/docs/table-usage.rst b/docs/table-usage.rst
index 9d28da1ebb..01459b5f8e 100644
--- a/docs/table-usage.rst
+++ b/docs/table-usage.rst
@@ -1,5 +1,5 @@
-Table Admin
-===========
+Table Admin Usage
+=================
After creating an :class:`~google.cloud.spanner_v1.database.Database`, you can
interact with individual tables for that instance.
diff --git a/docs/transaction-usage.rst b/docs/transaction-usage.rst
index 4781cfa148..78026bf5a4 100644
--- a/docs/transaction-usage.rst
+++ b/docs/transaction-usage.rst
@@ -5,7 +5,8 @@ A :class:`~google.cloud.spanner_v1.transaction.Transaction` represents a
transaction: when the transaction commits, it will send any accumulated
mutations to the server.
-To understand more about how transactions work, visit [Transaction](https://cloud.google.com/spanner/docs/reference/rest/v1/Transaction).
+To understand more about how transactions work, visit
+`Transaction `_.
To learn more about how to use them in the Python client, continue reading.
@@ -90,8 +91,8 @@ any of the records already exists.
Update records using a Transaction
----------------------------------
-:meth:`Transaction.update` updates one or more existing records in a table. Fails
-if any of the records does not already exist.
+:meth:`Transaction.update` updates one or more existing records in a table.
+Fails if any of the records does not already exist.
.. code:: python
@@ -178,9 +179,9 @@ Using :meth:`~Database.run_in_transaction`
Rather than calling :meth:`~Transaction.commit` or :meth:`~Transaction.rollback`
manually, you should use :meth:`~Database.run_in_transaction` to run the
-function that you need. The transaction's :meth:`~Transaction.commit` method
+function that you need. The transaction's :meth:`~Transaction.commit` method
will be called automatically if the ``with`` block exits without raising an
-exception. The function will automatically be retried for
+exception. The function will automatically be retried for
:class:`~google.api_core.exceptions.Aborted` errors, but will raise on
:class:`~google.api_core.exceptions.GoogleAPICallError` and
:meth:`~Transaction.rollback` will be called on all others.
@@ -188,25 +189,30 @@ exception. The function will automatically be retried for
.. code:: python
def _unit_of_work(transaction):
-
transaction.insert(
- 'citizens', columns=['email', 'first_name', 'last_name', 'age'],
+ 'citizens',
+ columns=['email', 'first_name', 'last_name', 'age'],
values=[
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
- ])
+ ]
+ )
transaction.update(
- 'citizens', columns=['email', 'age'],
+ 'citizens',
+ columns=['email', 'age'],
values=[
['phred@exammple.com', 33],
['bharney@example.com', 32],
- ])
+ ]
+ )
...
- transaction.delete('citizens',
- keyset['bharney@example.com', 'nonesuch@example.com'])
+ transaction.delete(
+ 'citizens',
+ keyset=['bharney@example.com', 'nonesuch@example.com']
+ )
db.run_in_transaction(_unit_of_work)
@@ -242,7 +248,7 @@ If an exception is raised inside the ``with`` block, the transaction's
...
transaction.delete('citizens',
- keyset['bharney@example.com', 'nonesuch@example.com'])
+ keyset=['bharney@example.com', 'nonesuch@example.com'])
Begin a Transaction
diff --git a/examples/grpc_instrumentation_enabled.py b/examples/grpc_instrumentation_enabled.py
new file mode 100644
index 0000000000..c8bccd0a9d
--- /dev/null
+++ b/examples/grpc_instrumentation_enabled.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+import os
+import time
+
+import google.cloud.spanner as spanner
+from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+from opentelemetry import trace
+
+# Enable the gRPC instrumentation if you'd like more introspection.
+from opentelemetry.instrumentation.grpc import GrpcInstrumentorClient
+
+grpc_client_instrumentor = GrpcInstrumentorClient()
+grpc_client_instrumentor.instrument()
+
+
+def main():
+ # Setup common variables that'll be used between Spanner and traces.
+ project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project')
+
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+ trace.set_tracer_provider(tracer_provider)
+ # Retrieve a tracer from the global tracer provider.
+ tracer = tracer_provider.get_tracer('MyApp')
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(project_id)
+
+ instance = spanner_client.instance('test-instance')
+ database = instance.database('test-db')
+
+ # Now run our queries
+ with tracer.start_as_current_span('QueryInformationSchema'):
+ with database.snapshot() as snapshot:
+ with tracer.start_as_current_span('InformationSchema'):
+ info_schema = snapshot.execute_sql(
+ 'SELECT * FROM INFORMATION_SCHEMA.TABLES')
+ for row in info_schema:
+ print(row)
+
+ with tracer.start_as_current_span('ServerTimeQuery'):
+ with database.snapshot() as snapshot:
+ # Purposefully issue a bad SQL statement to examine exceptions
+ # that get recorded and a ERROR span status.
+ try:
+ data = snapshot.execute_sql('SELECT CURRENT_TIMESTAMPx()')
+ for row in data:
+ print(row)
+ except Exception as e:
+ pass
+
+
+if __name__ == '__main__':
+ main()
diff --git a/examples/trace.py b/examples/trace.py
new file mode 100644
index 0000000000..5b826ca5ad
--- /dev/null
+++ b/examples/trace.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+import os
+import time
+
+import google.cloud.spanner as spanner
+from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
+from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.sdk.trace.export import BatchSpanProcessor
+from opentelemetry.sdk.trace.sampling import ALWAYS_ON
+from opentelemetry import trace
+from opentelemetry.propagate import set_global_textmap
+from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
+
+# Setup common variables that'll be used between Spanner and traces.
+project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project')
+
+def spanner_with_cloud_trace():
+ # [START spanner_opentelemetry_traces_cloudtrace_usage]
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(
+ project_id,
+ observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True),
+ )
+
+ # [END spanner_opentelemetry_traces_cloudtrace_usage]
+ return spanner_client
+
+def spanner_with_otlp():
+ # [START spanner_opentelemetry_traces_otlp_usage]
+ # Setup OpenTelemetry, trace and OTLP exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4317")
+ tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter))
+
+ # Setup the Cloud Spanner Client.
+ spanner_client = spanner.Client(
+ project_id,
+ observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True),
+ )
+ # [END spanner_opentelemetry_traces_otlp_usage]
+ return spanner_client
+
+
+def main():
+ # Setup OpenTelemetry, trace and Cloud Trace exporter.
+ tracer_provider = TracerProvider(sampler=ALWAYS_ON)
+ trace_exporter = CloudTraceSpanExporter(project_id=project_id)
+ tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
+
+ # Setup the Cloud Spanner Client.
+ # Change to "spanner_client = spanner_with_otlp" to use OTLP exporter
+ spanner_client = spanner_with_cloud_trace()
+ instance = spanner_client.instance('test-instance')
+ database = instance.database('test-db')
+
+ # Set W3C Trace Context as the global propagator for end to end tracing.
+ set_global_textmap(TraceContextTextMapPropagator())
+
+ # Retrieve a tracer from our custom tracer provider.
+ tracer = tracer_provider.get_tracer('MyApp')
+
+ # Now run our queries
+ with tracer.start_as_current_span('QueryInformationSchema'):
+ with database.snapshot() as snapshot:
+ with tracer.start_as_current_span('InformationSchema'):
+ info_schema = snapshot.execute_sql(
+ 'SELECT * FROM INFORMATION_SCHEMA.TABLES')
+ for row in info_schema:
+ print(row)
+
+ with tracer.start_as_current_span('ServerTimeQuery'):
+ with database.snapshot() as snapshot:
+ # Purposefully issue a bad SQL statement to examine exceptions
+ # that get recorded and a ERROR span status.
+ try:
+ data = snapshot.execute_sql('SELECT CURRENT_TIMESTAMPx()')
+ for row in data:
+ print(row)
+ except Exception as e:
+ print(e)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google/__init__.py b/google/__init__.py
deleted file mode 100644
index 2f4b4738ae..0000000000
--- a/google/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- import pkgutil
-
- __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py
deleted file mode 100644
index 2f4b4738ae..0000000000
--- a/google/cloud/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- import pkgutil
-
- __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/google/cloud/spanner_admin_database_v1/__init__.py b/google/cloud/spanner_admin_database_v1/__init__.py
index a70cf0acfd..42b15fe254 100644
--- a/google/cloud/spanner_admin_database_v1/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,12 +13,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
+
+import google.api_core as api_core
+import sys
+
+__version__ = package_version.__version__
+
+if sys.version_info >= (3, 8): # pragma: NO COVER
+ from importlib import metadata
+else: # pragma: NO COVER
+ # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove
+ # this code path once we drop support for Python 3.7
+ import importlib_metadata as metadata
+
from .services.database_admin import DatabaseAdminClient
from .services.database_admin import DatabaseAdminAsyncClient
from .types.backup import Backup
from .types.backup import BackupInfo
+from .types.backup import BackupInstancePartition
from .types.backup import CopyBackupEncryptionConfig
from .types.backup import CopyBackupMetadata
from .types.backup import CopyBackupRequest
@@ -26,24 +41,40 @@
from .types.backup import CreateBackupMetadata
from .types.backup import CreateBackupRequest
from .types.backup import DeleteBackupRequest
+from .types.backup import FullBackupSpec
from .types.backup import GetBackupRequest
+from .types.backup import IncrementalBackupSpec
from .types.backup import ListBackupOperationsRequest
from .types.backup import ListBackupOperationsResponse
from .types.backup import ListBackupsRequest
from .types.backup import ListBackupsResponse
from .types.backup import UpdateBackupRequest
+from .types.backup_schedule import BackupSchedule
+from .types.backup_schedule import BackupScheduleSpec
+from .types.backup_schedule import CreateBackupScheduleRequest
+from .types.backup_schedule import CrontabSpec
+from .types.backup_schedule import DeleteBackupScheduleRequest
+from .types.backup_schedule import GetBackupScheduleRequest
+from .types.backup_schedule import ListBackupSchedulesRequest
+from .types.backup_schedule import ListBackupSchedulesResponse
+from .types.backup_schedule import UpdateBackupScheduleRequest
from .types.common import EncryptionConfig
from .types.common import EncryptionInfo
from .types.common import OperationProgress
from .types.common import DatabaseDialect
+from .types.spanner_database_admin import AddSplitPointsRequest
+from .types.spanner_database_admin import AddSplitPointsResponse
from .types.spanner_database_admin import CreateDatabaseMetadata
from .types.spanner_database_admin import CreateDatabaseRequest
from .types.spanner_database_admin import Database
from .types.spanner_database_admin import DatabaseRole
+from .types.spanner_database_admin import DdlStatementActionInfo
from .types.spanner_database_admin import DropDatabaseRequest
from .types.spanner_database_admin import GetDatabaseDdlRequest
from .types.spanner_database_admin import GetDatabaseDdlResponse
from .types.spanner_database_admin import GetDatabaseRequest
+from .types.spanner_database_admin import InternalUpdateGraphOperationRequest
+from .types.spanner_database_admin import InternalUpdateGraphOperationResponse
from .types.spanner_database_admin import ListDatabaseOperationsRequest
from .types.spanner_database_admin import ListDatabaseOperationsResponse
from .types.spanner_database_admin import ListDatabaseRolesRequest
@@ -55,36 +86,149 @@
from .types.spanner_database_admin import RestoreDatabaseMetadata
from .types.spanner_database_admin import RestoreDatabaseRequest
from .types.spanner_database_admin import RestoreInfo
+from .types.spanner_database_admin import SplitPoints
from .types.spanner_database_admin import UpdateDatabaseDdlMetadata
from .types.spanner_database_admin import UpdateDatabaseDdlRequest
+from .types.spanner_database_admin import UpdateDatabaseMetadata
+from .types.spanner_database_admin import UpdateDatabaseRequest
from .types.spanner_database_admin import RestoreSourceType
+if hasattr(api_core, "check_python_version") and hasattr(
+ api_core, "check_dependency_versions"
+): # pragma: NO COVER
+ api_core.check_python_version("google.cloud.spanner_admin_database_v1") # type: ignore
+ api_core.check_dependency_versions("google.cloud.spanner_admin_database_v1") # type: ignore
+else: # pragma: NO COVER
+ # An older version of api_core is installed which does not define the
+ # functions above. We do equivalent checks manually.
+ try:
+ import warnings
+ import sys
+
+ _py_version_str = sys.version.split()[0]
+ _package_label = "google.cloud.spanner_admin_database_v1"
+ if sys.version_info < (3, 9):
+ warnings.warn(
+ "You are using a non-supported Python version "
+ + f"({_py_version_str}). Google will not post any further "
+ + f"updates to {_package_label} supporting this Python version. "
+ + "Please upgrade to the latest Python version, or at "
+ + f"least to Python 3.9, and then update {_package_label}.",
+ FutureWarning,
+ )
+ if sys.version_info[:2] == (3, 9):
+ warnings.warn(
+ f"You are using a Python version ({_py_version_str}) "
+ + f"which Google will stop supporting in {_package_label} in "
+ + "January 2026. Please "
+ + "upgrade to the latest Python version, or at "
+ + "least to Python 3.10, before then, and "
+ + f"then update {_package_label}.",
+ FutureWarning,
+ )
+
+ def parse_version_to_tuple(version_string: str):
+ """Safely converts a semantic version string to a comparable tuple of integers.
+ Example: "4.25.8" -> (4, 25, 8)
+ Ignores non-numeric parts and handles common version formats.
+ Args:
+ version_string: Version string in the format "x.y.z" or "x.y.z"
+ Returns:
+ Tuple of integers for the parsed version string.
+ """
+ parts = []
+ for part in version_string.split("."):
+ try:
+ parts.append(int(part))
+ except ValueError:
+ # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here.
+ # This is a simplification compared to 'packaging.parse_version', but sufficient
+ # for comparing strictly numeric semantic versions.
+ break
+ return tuple(parts)
+
+ def _get_version(dependency_name):
+ try:
+ version_string: str = metadata.version(dependency_name)
+ parsed_version = parse_version_to_tuple(version_string)
+ return (parsed_version, version_string)
+ except Exception:
+ # Catch exceptions from metadata.version() (e.g., PackageNotFoundError)
+ # or errors during parse_version_to_tuple
+ return (None, "--")
+
+ _dependency_package = "google.protobuf"
+ _next_supported_version = "4.25.8"
+ _next_supported_version_tuple = (4, 25, 8)
+ _recommendation = " (we recommend 6.x)"
+ (_version_used, _version_used_string) = _get_version(_dependency_package)
+ if _version_used and _version_used < _next_supported_version_tuple:
+ warnings.warn(
+ f"Package {_package_label} depends on "
+ + f"{_dependency_package}, currently installed at version "
+ + f"{_version_used_string}. Future updates to "
+ + f"{_package_label} will require {_dependency_package} at "
+ + f"version {_next_supported_version} or higher{_recommendation}."
+ + " Please ensure "
+ + "that either (a) your Python environment doesn't pin the "
+ + f"version of {_dependency_package}, so that updates to "
+ + f"{_package_label} can require the higher version, or "
+ + "(b) you manually update your Python environment to use at "
+ + f"least version {_next_supported_version} of "
+ + f"{_dependency_package}.",
+ FutureWarning,
+ )
+ except Exception:
+ warnings.warn(
+ "Could not determine the version of Python "
+ + "currently being used. To continue receiving "
+ + "updates for {_package_label}, ensure you are "
+ + "using a supported version of Python; see "
+ + "https://devguide.python.org/versions/"
+ )
+
__all__ = (
"DatabaseAdminAsyncClient",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
"Backup",
"BackupInfo",
+ "BackupInstancePartition",
+ "BackupSchedule",
+ "BackupScheduleSpec",
"CopyBackupEncryptionConfig",
"CopyBackupMetadata",
"CopyBackupRequest",
"CreateBackupEncryptionConfig",
"CreateBackupMetadata",
"CreateBackupRequest",
+ "CreateBackupScheduleRequest",
"CreateDatabaseMetadata",
"CreateDatabaseRequest",
+ "CrontabSpec",
"Database",
"DatabaseAdminClient",
"DatabaseDialect",
"DatabaseRole",
+ "DdlStatementActionInfo",
"DeleteBackupRequest",
+ "DeleteBackupScheduleRequest",
"DropDatabaseRequest",
"EncryptionConfig",
"EncryptionInfo",
+ "FullBackupSpec",
"GetBackupRequest",
+ "GetBackupScheduleRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
"GetDatabaseRequest",
+ "IncrementalBackupSpec",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
"ListBackupsRequest",
"ListBackupsResponse",
"ListDatabaseOperationsRequest",
@@ -100,7 +244,11 @@
"RestoreDatabaseRequest",
"RestoreInfo",
"RestoreSourceType",
+ "SplitPoints",
"UpdateBackupRequest",
+ "UpdateBackupScheduleRequest",
"UpdateDatabaseDdlMetadata",
"UpdateDatabaseDdlRequest",
+ "UpdateDatabaseMetadata",
+ "UpdateDatabaseRequest",
)
diff --git a/google/cloud/spanner_admin_database_v1/gapic_metadata.json b/google/cloud/spanner_admin_database_v1/gapic_metadata.json
index 446e3a6d88..027a4f612b 100644
--- a/google/cloud/spanner_admin_database_v1/gapic_metadata.json
+++ b/google/cloud/spanner_admin_database_v1/gapic_metadata.json
@@ -10,6 +10,11 @@
"grpc": {
"libraryClient": "DatabaseAdminClient",
"rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
"CopyBackup": {
"methods": [
"copy_backup"
@@ -20,6 +25,11 @@
"create_backup"
]
},
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
"CreateDatabase": {
"methods": [
"create_database"
@@ -30,6 +40,11 @@
"delete_backup"
]
},
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
"DropDatabase": {
"methods": [
"drop_database"
@@ -40,6 +55,11 @@
"get_backup"
]
},
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
"GetDatabase": {
"methods": [
"get_database"
@@ -55,11 +75,21 @@
"get_iam_policy"
]
},
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
"ListBackupOperations": {
"methods": [
"list_backup_operations"
]
},
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
"ListBackups": {
"methods": [
"list_backups"
@@ -100,6 +130,16 @@
"update_backup"
]
},
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
"UpdateDatabaseDdl": {
"methods": [
"update_database_ddl"
@@ -110,6 +150,11 @@
"grpc-async": {
"libraryClient": "DatabaseAdminAsyncClient",
"rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
"CopyBackup": {
"methods": [
"copy_backup"
@@ -120,6 +165,11 @@
"create_backup"
]
},
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
"CreateDatabase": {
"methods": [
"create_database"
@@ -130,6 +180,11 @@
"delete_backup"
]
},
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
"DropDatabase": {
"methods": [
"drop_database"
@@ -140,6 +195,11 @@
"get_backup"
]
},
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
"GetDatabase": {
"methods": [
"get_database"
@@ -155,11 +215,21 @@
"get_iam_policy"
]
},
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
"ListBackupOperations": {
"methods": [
"list_backup_operations"
]
},
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
"ListBackups": {
"methods": [
"list_backups"
@@ -200,6 +270,156 @@
"update_backup"
]
},
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
+ "UpdateDatabaseDdl": {
+ "methods": [
+ "update_database_ddl"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "DatabaseAdminClient",
+ "rpcs": {
+ "AddSplitPoints": {
+ "methods": [
+ "add_split_points"
+ ]
+ },
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
+ "CreateBackup": {
+ "methods": [
+ "create_backup"
+ ]
+ },
+ "CreateBackupSchedule": {
+ "methods": [
+ "create_backup_schedule"
+ ]
+ },
+ "CreateDatabase": {
+ "methods": [
+ "create_database"
+ ]
+ },
+ "DeleteBackup": {
+ "methods": [
+ "delete_backup"
+ ]
+ },
+ "DeleteBackupSchedule": {
+ "methods": [
+ "delete_backup_schedule"
+ ]
+ },
+ "DropDatabase": {
+ "methods": [
+ "drop_database"
+ ]
+ },
+ "GetBackup": {
+ "methods": [
+ "get_backup"
+ ]
+ },
+ "GetBackupSchedule": {
+ "methods": [
+ "get_backup_schedule"
+ ]
+ },
+ "GetDatabase": {
+ "methods": [
+ "get_database"
+ ]
+ },
+ "GetDatabaseDdl": {
+ "methods": [
+ "get_database_ddl"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "InternalUpdateGraphOperation": {
+ "methods": [
+ "internal_update_graph_operation"
+ ]
+ },
+ "ListBackupOperations": {
+ "methods": [
+ "list_backup_operations"
+ ]
+ },
+ "ListBackupSchedules": {
+ "methods": [
+ "list_backup_schedules"
+ ]
+ },
+ "ListBackups": {
+ "methods": [
+ "list_backups"
+ ]
+ },
+ "ListDatabaseOperations": {
+ "methods": [
+ "list_database_operations"
+ ]
+ },
+ "ListDatabaseRoles": {
+ "methods": [
+ "list_database_roles"
+ ]
+ },
+ "ListDatabases": {
+ "methods": [
+ "list_databases"
+ ]
+ },
+ "RestoreDatabase": {
+ "methods": [
+ "restore_database"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateBackup": {
+ "methods": [
+ "update_backup"
+ ]
+ },
+ "UpdateBackupSchedule": {
+ "methods": [
+ "update_backup_schedule"
+ ]
+ },
+ "UpdateDatabase": {
+ "methods": [
+ "update_database"
+ ]
+ },
"UpdateDatabaseDdl": {
"methods": [
"update_database_ddl"
diff --git a/setup.cfg b/google/cloud/spanner_admin_database_v1/gapic_version.py
similarity index 78%
rename from setup.cfg
rename to google/cloud/spanner_admin_database_v1/gapic_version.py
index c3a2b39f65..bf54fc40ae 100644
--- a/setup.cfg
+++ b/google/cloud/spanner_admin_database_v1/gapic_version.py
@@ -1,19 +1,16 @@
# -*- coding: utf-8 -*-
-#
-# Copyright 2020 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# https://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-# Generated by synthtool. DO NOT EDIT!
-[bdist_wheel]
-universal = 1
+#
+__version__ = "3.63.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_database_v1/services/__init__.py b/google/cloud/spanner_admin_database_v1/services/__init__.py
index e8e1c3845d..cbf94b283c 100644
--- a/google/cloud/spanner_admin_database_v1/services/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
index 6fcf1b82e7..580a7ed2a2 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
index 7aa227856f..0e08065a7d 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,35 +13,55 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import logging as std_logging
from collections import OrderedDict
-import functools
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
-import pkg_resources
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+import uuid
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
-from google.api_core import retry as retries
+from google.api_core import retry_async as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
try:
- OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
except AttributeError: # pragma: NO COVER
- OptionalRetry = Union[retries.Retry, object] # type: ignore
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
@@ -49,25 +69,42 @@
from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
from .client import DatabaseAdminClient
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
class DatabaseAdminAsyncClient:
"""Cloud Spanner Database Admin API
The Cloud Spanner Database Admin API can be used to:
- - create, drop, and list databases
- - update the schema of pre-existing databases
- - create, delete and list backups for a database
- - restore a database from an existing backup
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
"""
_client: DatabaseAdminClient
+ # Copy defaults from the synchronous client for use here.
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = DatabaseAdminClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DatabaseAdminClient.DEFAULT_MTLS_ENDPOINT
+ _DEFAULT_ENDPOINT_TEMPLATE = DatabaseAdminClient._DEFAULT_ENDPOINT_TEMPLATE
+ _DEFAULT_UNIVERSE = DatabaseAdminClient._DEFAULT_UNIVERSE
backup_path = staticmethod(DatabaseAdminClient.backup_path)
parse_backup_path = staticmethod(DatabaseAdminClient.parse_backup_path)
+ backup_schedule_path = staticmethod(DatabaseAdminClient.backup_schedule_path)
+ parse_backup_schedule_path = staticmethod(
+ DatabaseAdminClient.parse_backup_schedule_path
+ )
crypto_key_path = staticmethod(DatabaseAdminClient.crypto_key_path)
parse_crypto_key_path = staticmethod(DatabaseAdminClient.parse_crypto_key_path)
crypto_key_version_path = staticmethod(DatabaseAdminClient.crypto_key_version_path)
@@ -82,6 +119,10 @@ class DatabaseAdminAsyncClient:
)
instance_path = staticmethod(DatabaseAdminClient.instance_path)
parse_instance_path = staticmethod(DatabaseAdminClient.parse_instance_path)
+ instance_partition_path = staticmethod(DatabaseAdminClient.instance_partition_path)
+ parse_instance_partition_path = staticmethod(
+ DatabaseAdminClient.parse_instance_partition_path
+ )
common_billing_account_path = staticmethod(
DatabaseAdminClient.common_billing_account_path
)
@@ -156,7 +197,7 @@ def get_mtls_endpoint_and_cert_source(
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
- default mTLS endpoint; if the environment variabel is "never", use the default API
+ default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
@@ -185,19 +226,38 @@ def transport(self) -> DatabaseAdminTransport:
"""
return self._client.transport
- get_transport_class = functools.partial(
- type(DatabaseAdminClient).get_transport_class, type(DatabaseAdminClient)
- )
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._client._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used
+ by the client instance.
+ """
+ return self._client._universe_domain
+
+ get_transport_class = DatabaseAdminClient.get_transport_class
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
- transport: Union[str, DatabaseAdminTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[str, DatabaseAdminTransport, Callable[..., DatabaseAdminTransport]]
+ ] = "grpc_asyncio",
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiates the database admin client.
+ """Instantiates the database admin async client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -205,26 +265,43 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, ~.DatabaseAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,DatabaseAdminTransport,Callable[..., DatabaseAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the DatabaseAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
@@ -236,14 +313,38 @@ def __init__(
client_info=client_info,
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.database_v1.DatabaseAdminAsyncClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "universeDomain": getattr(
+ self._client._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._client._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "credentialsType": None,
+ },
+ )
+
async def list_databases(
self,
- request: Union[spanner_database_admin.ListDatabasesRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabasesRequest, dict]
+ ] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabasesAsyncPager:
r"""Lists Cloud Spanner databases.
@@ -275,7 +376,7 @@ async def sample_list_databases():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest, dict]]):
The request object. The request for
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
parent (:class:`str`):
@@ -286,32 +387,40 @@ async def sample_list_databases():
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesAsyncPager:
The response for
- [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.ListDatabasesRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabasesRequest):
+ request = spanner_database_admin.ListDatabasesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -320,21 +429,9 @@ async def sample_list_databases():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_databases,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_databases
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -342,6 +439,9 @@ async def sample_list_databases():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -356,6 +456,8 @@ async def sample_list_databases():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -364,13 +466,15 @@ async def sample_list_databases():
async def create_database(
self,
- request: Union[spanner_database_admin.CreateDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.CreateDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- create_statement: str = None,
+ parent: Optional[str] = None,
+ create_statement: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Cloud Spanner database and starts to prepare it
for serving. The returned [long-running
@@ -409,13 +513,13 @@ async def sample_create_database():
print("Waiting for operation to complete...")
- response = await operation.result()
+ response = (await operation).result()
# Handle the response
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateDatabaseRequest, dict]]):
The request object. The request for
[CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
parent (:class:`str`):
@@ -438,11 +542,13 @@ async def sample_create_database():
This corresponds to the ``create_statement`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -454,16 +560,22 @@ async def sample_create_database():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, create_statement])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, create_statement]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.CreateDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.CreateDatabaseRequest):
+ request = spanner_database_admin.CreateDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -474,11 +586,9 @@ async def sample_create_database():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.create_database,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -486,6 +596,9 @@ async def sample_create_database():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -507,12 +620,14 @@ async def sample_create_database():
async def get_database(
self,
- request: Union[spanner_database_admin.GetDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseRequest, dict]
+ ] = None,
*,
- name: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.Database:
r"""Gets the state of a Cloud Spanner database.
@@ -543,7 +658,7 @@ async def sample_get_database():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseRequest, dict]]):
The request object. The request for
[GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
name (:class:`str`):
@@ -554,27 +669,35 @@ async def sample_get_database():
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Database:
A Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.GetDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.GetDatabaseRequest):
+ request = spanner_database_admin.GetDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -583,21 +706,9 @@ async def sample_get_database():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_database,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -605,6 +716,182 @@ async def sample_get_database():
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_database(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseRequest, dict]
+ ] = None,
+ *,
+ database: Optional[spanner_database_admin.Database] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ database = spanner_admin_database_v1.Database()
+ database.name = "name_value"
+
+ request = spanner_admin_database_v1.UpdateDatabaseRequest(
+ database=database,
+ )
+
+ # Make the request
+ operation = client.update_database(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest, dict]]):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ database (:class:`google.cloud.spanner_admin_database_v1.types.Database`):
+ Required. The database to update. The ``name`` field of
+ the database is of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Database`
+ A Cloud Spanner database.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseRequest):
+ request = spanner_database_admin.UpdateDatabaseRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_database
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("database.name", request.database.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -613,18 +900,28 @@ async def sample_get_database():
metadata=metadata,
)
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_database_admin.Database,
+ metadata_type=spanner_database_admin.UpdateDatabaseMetadata,
+ )
+
# Done; return the response.
return response
async def update_database_ddl(
self,
- request: Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
- statements: Sequence[str] = None,
+ database: Optional[str] = None,
+ statements: Optional[MutableSequence[str]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the schema of a Cloud Spanner database by
creating/altering/dropping tables, columns, indexes, etc. The
@@ -662,26 +959,26 @@ async def sample_update_database_ddl():
print("Waiting for operation to complete...")
- response = await operation.result()
+ response = (await operation).result()
# Handle the response
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]):
- The request object. Enqueues the given DDL statements to
- be applied, in order but not necessarily all at once, to
- the database schema at some point (or points) in the
- future. The server checks that the statements are
- executable (syntactically valid, name tables that exist,
- etc.) before enqueueing them, but they may still fail
- upon
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]]):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
later execution (e.g., if a statement from another batch
of statements is applied first and it conflicts in some
way, or if there is some data-related problem like a
- `NULL` value in a column to which `NOT NULL` would be
- added). If a statement fails, all subsequent statements
- in the batch are automatically cancelled.
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
Each batch of statements is assigned a name which can be
used with the
[Operations][google.longrunning.Operations] API to
@@ -693,18 +990,20 @@ async def sample_update_database_ddl():
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- statements (:class:`Sequence[str]`):
+ statements (:class:`MutableSequence[str]`):
Required. DDL statements to be
applied to the database.
This corresponds to the ``statements`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -723,16 +1022,22 @@ async def sample_update_database_ddl():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database, statements])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, statements]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest):
+ request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -743,21 +1048,9 @@ async def sample_update_database_ddl():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.update_database_ddl,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_database_ddl
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -765,6 +1058,9 @@ async def sample_update_database_ddl():
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -786,12 +1082,14 @@ async def sample_update_database_ddl():
async def drop_database(
self,
- request: Union[spanner_database_admin.DropDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.DropDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
+ database: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
@@ -822,7 +1120,7 @@ async def sample_drop_database():
await client.drop_database(request=request)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]]):
The request object. The request for
[DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
database (:class:`str`):
@@ -830,23 +1128,31 @@ async def sample_drop_database():
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.DropDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.DropDatabaseRequest):
+ request = spanner_database_admin.DropDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -855,21 +1161,9 @@ async def sample_drop_database():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.drop_database,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.drop_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -877,6 +1171,9 @@ async def sample_drop_database():
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
request,
@@ -887,12 +1184,14 @@ async def sample_drop_database():
async def get_database_ddl(
self,
- request: Union[spanner_database_admin.GetDatabaseDdlRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
+ database: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.GetDatabaseDdlResponse:
r"""Returns the schema of a Cloud Spanner database as a list of
formatted DDL statements. This method does not show pending
@@ -926,7 +1225,7 @@ async def sample_get_database_ddl():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlRequest, dict]]):
The request object. The request for
[GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
database (:class:`str`):
@@ -937,29 +1236,37 @@ async def sample_get_database_ddl():
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
The response for
- [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.GetDatabaseDdlRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.GetDatabaseDdlRequest):
+ request = spanner_database_admin.GetDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -968,21 +1275,9 @@ async def sample_get_database_ddl():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_database_ddl,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_database_ddl
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -990,6 +1285,9 @@ async def sample_get_database_ddl():
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1003,12 +1301,12 @@ async def sample_get_database_ddl():
async def set_iam_policy(
self,
- request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
+ resource: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on a database or backup resource.
Replaces any existing policy.
@@ -1048,9 +1346,8 @@ async def sample_set_iam_policy():
print(response)
Args:
- request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
- The request object. Request message for `SetIamPolicy`
- method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
+ The request object. Request message for ``SetIamPolicy`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
@@ -1060,11 +1357,13 @@ async def sample_set_iam_policy():
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
@@ -1085,78 +1384,46 @@ async def sample_set_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
- request = iam_policy_pb2.SetIamPolicyRequest(
- resource=resource,
- )
+ request = iam_policy_pb2.SetIamPolicyRequest(resource=resource)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.set_iam_policy,
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.set_iam_policy
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1164,6 +1431,9 @@ async def sample_set_iam_policy():
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1177,12 +1447,12 @@ async def sample_set_iam_policy():
async def get_iam_policy(
self,
- request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
+ resource: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a database or backup
resource. Returns an empty policy if a database or backup exists
@@ -1223,9 +1493,8 @@ async def sample_get_iam_policy():
print(response)
Args:
- request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
- The request object. Request message for `GetIamPolicy`
- method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
+ The request object. Request message for ``GetIamPolicy`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
@@ -1235,11 +1504,13 @@ async def sample_get_iam_policy():
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
@@ -1260,88 +1531,46 @@ async def sample_get_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
- request = iam_policy_pb2.GetIamPolicyRequest(
- resource=resource,
- )
+ request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_iam_policy,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=30.0,
- ),
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_iam_policy
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1349,6 +1578,9 @@ async def sample_get_iam_policy():
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1362,13 +1594,13 @@ async def sample_get_iam_policy():
async def test_iam_permissions(
self,
- request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
- resource: str = None,
- permissions: Sequence[str] = None,
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the specified
database or backup resource.
@@ -1410,9 +1642,8 @@ async def sample_test_iam_permissions():
print(response)
Args:
- request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
- The request object. Request message for
- `TestIamPermissions` method.
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
+ The request object. Request message for ``TestIamPermissions`` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy detail is being requested. See
@@ -1422,7 +1653,7 @@ async def sample_test_iam_permissions():
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- permissions (:class:`Sequence[str]`):
+ permissions (:class:`MutableSequence[str]`):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
@@ -1431,43 +1662,45 @@ async def sample_test_iam_permissions():
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource, permissions])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
request = iam_policy_pb2.TestIamPermissionsRequest(
- resource=resource,
- permissions=permissions,
+ resource=resource, permissions=permissions
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.test_iam_permissions,
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.test_iam_permissions
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1475,6 +1708,9 @@ async def sample_test_iam_permissions():
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1488,14 +1724,14 @@ async def sample_test_iam_permissions():
async def create_backup(
self,
- request: Union[gsad_backup.CreateBackupRequest, dict] = None,
+ request: Optional[Union[gsad_backup.CreateBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup: gsad_backup.Backup = None,
- backup_id: str = None,
+ parent: Optional[str] = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ backup_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Starts creating a new Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1537,13 +1773,13 @@ async def sample_create_backup():
print("Waiting for operation to complete...")
- response = await operation.result()
+ response = (await operation).result()
# Handle the response
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.CreateBackupRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateBackupRequest, dict]]):
The request object. The request for
[CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
parent (:class:`str`):
@@ -1572,11 +1808,13 @@ async def sample_create_backup():
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -1588,16 +1826,22 @@ async def sample_create_backup():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup, backup_id])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup, backup_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = gsad_backup.CreateBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup.CreateBackupRequest):
+ request = gsad_backup.CreateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1610,11 +1854,9 @@ async def sample_create_backup():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.create_backup,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1622,6 +1864,9 @@ async def sample_create_backup():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1643,15 +1888,15 @@ async def sample_create_backup():
async def copy_backup(
self,
- request: Union[backup.CopyBackupRequest, dict] = None,
+ request: Optional[Union[backup.CopyBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup_id: str = None,
- source_backup: str = None,
- expire_time: timestamp_pb2.Timestamp = None,
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Starts copying a Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1664,8 +1909,8 @@ async def copy_backup(
The [response][google.longrunning.Operation.response] field type
is [Backup][google.spanner.admin.database.v1.Backup], if
successful. Cancelling the returned operation will stop the
- copying and delete the backup. Concurrent CopyBackup requests
- can run on the same source backup.
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
.. code-block:: python
@@ -1694,13 +1939,13 @@ async def sample_copy_backup():
print("Waiting for operation to complete...")
- response = await operation.result()
+ response = (await operation).result()
# Handle the response
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.CopyBackupRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CopyBackupRequest, dict]]):
The request object. The request for
[CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
parent (:class:`str`):
@@ -1743,11 +1988,13 @@ async def sample_copy_backup():
This corresponds to the ``expire_time`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -1759,16 +2006,22 @@ async def sample_copy_backup():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup_id, source_backup, expire_time])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_id, source_backup, expire_time]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.CopyBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.CopyBackupRequest):
+ request = backup.CopyBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1783,11 +2036,9 @@ async def sample_copy_backup():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.copy_backup,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.copy_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1795,6 +2046,9 @@ async def sample_copy_backup():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1816,12 +2070,12 @@ async def sample_copy_backup():
async def get_backup(
self,
- request: Union[backup.GetBackupRequest, dict] = None,
+ request: Optional[Union[backup.GetBackupRequest, dict]] = None,
*,
- name: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> backup.Backup:
r"""Gets metadata on a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
@@ -1853,7 +2107,7 @@ async def sample_get_backup():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.GetBackupRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetBackupRequest, dict]]):
The request object. The request for
[GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
name (:class:`str`):
@@ -1863,27 +2117,35 @@ async def sample_get_backup():
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.GetBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.GetBackupRequest):
+ request = backup.GetBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -1892,21 +2154,9 @@ async def sample_get_backup():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -1914,6 +2164,9 @@ async def sample_get_backup():
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1927,13 +2180,13 @@ async def sample_get_backup():
async def update_backup(
self,
- request: Union[gsad_backup.UpdateBackupRequest, dict] = None,
+ request: Optional[Union[gsad_backup.UpdateBackupRequest, dict]] = None,
*,
- backup: gsad_backup.Backup = None,
- update_mask: field_mask_pb2.FieldMask = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> gsad_backup.Backup:
r"""Updates a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
@@ -1964,7 +2217,7 @@ async def sample_update_backup():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupRequest, dict]]):
The request object. The request for
[UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
backup (:class:`google.cloud.spanner_admin_database_v1.types.Backup`):
@@ -1973,7 +2226,7 @@ async def sample_update_backup():
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -1990,27 +2243,35 @@ async def sample_update_backup():
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([backup, update_mask])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = gsad_backup.UpdateBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup.UpdateBackupRequest):
+ request = gsad_backup.UpdateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -2021,21 +2282,9 @@ async def sample_update_backup():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.update_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2045,6 +2294,9 @@ async def sample_update_backup():
),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2058,12 +2310,12 @@ async def sample_update_backup():
async def delete_backup(
self,
- request: Union[backup.DeleteBackupRequest, dict] = None,
+ request: Optional[Union[backup.DeleteBackupRequest, dict]] = None,
*,
- name: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
@@ -2092,7 +2344,7 @@ async def sample_delete_backup():
await client.delete_backup(request=request)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupRequest, dict]]):
The request object. The request for
[DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
name (:class:`str`):
@@ -2103,23 +2355,31 @@ async def sample_delete_backup():
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.DeleteBackupRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.DeleteBackupRequest):
+ request = backup.DeleteBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -2128,21 +2388,9 @@ async def sample_delete_backup():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.delete_backup,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_backup
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2150,6 +2398,9 @@ async def sample_delete_backup():
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
request,
@@ -2160,12 +2411,12 @@ async def sample_delete_backup():
async def list_backups(
self,
- request: Union[backup.ListBackupsRequest, dict] = None,
+ request: Optional[Union[backup.ListBackupsRequest, dict]] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupsAsyncPager:
r"""Lists completed and pending backups. Backups returned are
ordered by ``create_time`` in descending order, starting from
@@ -2199,7 +2450,7 @@ async def sample_list_backups():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupsRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupsRequest, dict]]):
The request object. The request for
[ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
parent (:class:`str`):
@@ -2209,32 +2460,40 @@ async def sample_list_backups():
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsAsyncPager:
The response for
- [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.ListBackupsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.ListBackupsRequest):
+ request = backup.ListBackupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -2243,21 +2502,9 @@ async def sample_list_backups():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_backups,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backups
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2265,6 +2512,9 @@ async def sample_list_backups():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2279,6 +2529,8 @@ async def sample_list_backups():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -2287,14 +2539,16 @@ async def sample_list_backups():
async def restore_database(
self,
- request: Union[spanner_database_admin.RestoreDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.RestoreDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- database_id: str = None,
- backup: str = None,
+ parent: Optional[str] = None,
+ database_id: Optional[str] = None,
+ backup: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation_async.AsyncOperation:
r"""Create a new database by restoring from a completed backup. The
new database must be in the same project and in an instance with
@@ -2343,13 +2597,13 @@ async def sample_restore_database():
print("Waiting for operation to complete...")
- response = await operation.result()
+ response = (await operation).result()
# Handle the response
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.RestoreDatabaseRequest, dict]]):
The request object. The request for
[RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
parent (:class:`str`):
@@ -2380,11 +2634,13 @@ async def sample_restore_database():
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation_async.AsyncOperation:
@@ -2396,16 +2652,22 @@ async def sample_restore_database():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, database_id, backup])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, database_id, backup]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.RestoreDatabaseRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.RestoreDatabaseRequest):
+ request = spanner_database_admin.RestoreDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -2418,11 +2680,9 @@ async def sample_restore_database():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.restore_database,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.restore_database
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2430,6 +2690,9 @@ async def sample_restore_database():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2451,14 +2714,14 @@ async def sample_restore_database():
async def list_database_operations(
self,
- request: Union[
- spanner_database_admin.ListDatabaseOperationsRequest, dict
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseOperationsRequest, dict]
] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseOperationsAsyncPager:
r"""Lists database
[longrunning-operations][google.longrunning.Operation]. A
@@ -2499,7 +2762,7 @@ async def sample_list_database_operations():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest, dict]]):
The request object. The request for
[ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
parent (:class:`str`):
@@ -2510,11 +2773,13 @@ async def sample_list_database_operations():
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsAsyncPager:
@@ -2526,16 +2791,24 @@ async def sample_list_database_operations():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.ListDatabaseOperationsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.ListDatabaseOperationsRequest
+ ):
+ request = spanner_database_admin.ListDatabaseOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -2544,21 +2817,9 @@ async def sample_list_database_operations():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_database_operations,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_database_operations
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2566,6 +2827,9 @@ async def sample_list_database_operations():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2580,6 +2844,8 @@ async def sample_list_database_operations():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -2588,12 +2854,12 @@ async def sample_list_database_operations():
async def list_backup_operations(
self,
- request: Union[backup.ListBackupOperationsRequest, dict] = None,
+ request: Optional[Union[backup.ListBackupOperationsRequest, dict]] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupOperationsAsyncPager:
r"""Lists the backup [long-running
operations][google.longrunning.Operation] in the given instance.
@@ -2636,7 +2902,7 @@ async def sample_list_backup_operations():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest, dict]]):
The request object. The request for
[ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
parent (:class:`str`):
@@ -2647,11 +2913,13 @@ async def sample_list_backup_operations():
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager:
@@ -2663,16 +2931,22 @@ async def sample_list_backup_operations():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = backup.ListBackupOperationsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup.ListBackupOperationsRequest):
+ request = backup.ListBackupOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -2681,21 +2955,9 @@ async def sample_list_backup_operations():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_backup_operations,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backup_operations
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2703,6 +2965,9 @@ async def sample_list_backup_operations():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2717,6 +2982,8 @@ async def sample_list_backup_operations():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -2725,12 +2992,14 @@ async def sample_list_backup_operations():
async def list_database_roles(
self,
- request: Union[spanner_database_admin.ListDatabaseRolesRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseRolesRequest, dict]
+ ] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseRolesAsyncPager:
r"""Lists Cloud Spanner database roles.
@@ -2762,43 +3031,51 @@ async def sample_list_database_roles():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesRequest, dict]]):
The request object. The request for
[ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
parent (:class:`str`):
Required. The database whose roles should be listed.
Values are of the form
- ``projects//instances//databases//databaseRoles``.
+ ``projects//instances//databases/``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesAsyncPager:
The response for
- [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_database_admin.ListDatabaseRolesRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.ListDatabaseRolesRequest):
+ request = spanner_database_admin.ListDatabaseRolesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -2807,21 +3084,9 @@ async def sample_list_database_roles():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_database_roles,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_database_roles
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2829,6 +3094,9 @@ async def sample_list_database_roles():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2843,48 +3111,908 @@ async def sample_list_database_roles():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
- async def list_operations(
+ async def add_split_points(
self,
- request: operations_pb2.ListOperationsRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.AddSplitPointsRequest, dict]
+ ] = None,
*,
+ database: Optional[str] = None,
+ split_points: Optional[
+ MutableSequence[spanner_database_admin.SplitPoints]
+ ] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> operations_pb2.ListOperationsResponse:
- r"""Lists operations that match the specified filter in the request.
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Adds split points to specified tables, indexes of a
+ database.
- Args:
- request (:class:`~.operations_pb2.ListOperationsRequest`):
- The request object. Request message for
- `ListOperations` method.
- retry (google.api_core.retry.Retry): Designation of what errors,
- if any, should be retried.
- timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
- Returns:
- ~.operations_pb2.ListOperationsResponse:
- Response message for ``ListOperations`` method.
- """
- # Create or coerce a protobuf request object.
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_add_split_points():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.AddSplitPointsRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = await client.add_split_points(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]]):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ database (:class:`str`):
+ Required. The database on whose tables/indexes split
+ points are to be added. Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ split_points (:class:`MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]`):
+ Required. The split points to add.
+ This corresponds to the ``split_points`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, split_points]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.AddSplitPointsRequest):
+ request = spanner_database_admin.AddSplitPointsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if split_points:
+ request.split_points.extend(split_points)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.add_split_points
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def create_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.CreateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ backup_schedule_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Creates a new backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_create_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupScheduleRequest(
+ parent="parent_value",
+ backup_schedule_id="backup_schedule_id_value",
+ )
+
+ # Make the request
+ response = await client.create_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.CreateBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ parent (:class:`str`):
+ Required. The name of the database
+ that this backup schedule applies to.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule (:class:`google.cloud.spanner_admin_database_v1.types.BackupSchedule`):
+ Required. The backup schedule to
+ create.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule_id (:class:`str`):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the
+ full backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``backup_schedule_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_schedule, backup_schedule_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.CreateBackupScheduleRequest):
+ request = gsad_backup_schedule.CreateBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if backup_schedule_id is not None:
+ request.backup_schedule_id = backup_schedule_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_backup_schedule(
+ self,
+ request: Optional[Union[backup_schedule.GetBackupScheduleRequest, dict]] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Gets backup schedule for the input schedule name.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_get_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.GetBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ name (:class:`str`):
+ Required. The name of the schedule to retrieve. Values
+ are of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.GetBackupScheduleRequest):
+ request = backup_schedule.GetBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.UpdateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Updates a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_update_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupScheduleRequest(
+ )
+
+ # Make the request
+ response = await client.update_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ backup_schedule (:class:`google.cloud.spanner_admin_database_v1.types.BackupSchedule`):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated
+ as specified by ``update_mask`` are required. Other
+ fields are ignored.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. A mask specifying which
+ fields in the BackupSchedule resource
+ should be updated. This mask is relative
+ to the BackupSchedule resource, not to
+ the request message. The field mask must
+ always be specified; this prevents any
+ future fields from being erased
+ accidentally.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup_schedule, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.UpdateBackupScheduleRequest):
+ request = gsad_backup_schedule.UpdateBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("backup_schedule.name", request.backup_schedule.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_backup_schedule(
+ self,
+ request: Optional[
+ Union[backup_schedule.DeleteBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_delete_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_backup_schedule(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupScheduleRequest, dict]]):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ name (:class:`str`):
+ Required. The name of the schedule to delete. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.DeleteBackupScheduleRequest):
+ request = backup_schedule.DeleteBackupScheduleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_backup_schedule
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def list_backup_schedules(
+ self,
+ request: Optional[
+ Union[backup_schedule.ListBackupSchedulesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListBackupSchedulesAsyncPager:
+ r"""Lists all the backup schedules for the database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_list_backup_schedules():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupSchedulesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_schedules(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest, dict]]):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ parent (:class:`str`):
+ Required. Database is the parent
+ resource whose backup schedules should
+ be listed. Values are of the form
+ projects//instances//databases/
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesAsyncPager:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.ListBackupSchedulesRequest):
+ request = backup_schedule.ListBackupSchedulesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_backup_schedules
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListBackupSchedulesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def internal_update_graph_operation(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.InternalUpdateGraphOperationRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ operation_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ r"""This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ async def sample_internal_update_graph_operation():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.InternalUpdateGraphOperationRequest(
+ database="database_value",
+ operation_id="operation_id_value",
+ vm_identity_token="vm_identity_token_value",
+ )
+
+ # Make the request
+ response = await client.internal_update_graph_operation(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationRequest, dict]]):
+ The request object. Internal request proto, do not use
+ directly.
+ database (:class:`str`):
+ Internal field, do not use directly.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ operation_id (:class:`str`):
+ Internal field, do not use directly.
+ This corresponds to the ``operation_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationResponse:
+ Internal response proto, do not use
+ directly.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, operation_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.InternalUpdateGraphOperationRequest
+ ):
+ request = spanner_database_admin.InternalUpdateGraphOperationRequest(
+ request
+ )
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if operation_id is not None:
+ request.operation_id = operation_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.internal_update_graph_operation
+ ]
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_operations(
+ self,
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Lists operations that match the specified filter in the request.
+
+ Args:
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
+ The request object. Request message for
+ `ListOperations` method.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.ListOperationsResponse:
+ Response message for ``ListOperations`` method.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.ListOperationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._client._transport.list_operations,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self.transport._wrapped_methods[self._client._transport.list_operations]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2892,6 +4020,9 @@ async def list_operations(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2905,11 +4036,11 @@ async def list_operations(
async def get_operation(
self,
- request: operations_pb2.GetOperationRequest = None,
+ request: Optional[operations_pb2.GetOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
@@ -2917,11 +4048,13 @@ async def get_operation(
request (:class:`~.operations_pb2.GetOperationRequest`):
The request object. Request message for
`GetOperation` method.
- retry (google.api_core.retry.Retry): Designation of what errors,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
@@ -2934,11 +4067,7 @@ async def get_operation(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._client._transport.get_operation,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self.transport._wrapped_methods[self._client._transport.get_operation]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -2946,6 +4075,9 @@ async def get_operation(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -2959,11 +4091,11 @@ async def get_operation(
async def delete_operation(
self,
- request: operations_pb2.DeleteOperationRequest = None,
+ request: Optional[operations_pb2.DeleteOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a long-running operation.
@@ -2976,11 +4108,13 @@ async def delete_operation(
request (:class:`~.operations_pb2.DeleteOperationRequest`):
The request object. Request message for
`DeleteOperation` method.
- retry (google.api_core.retry.Retry): Designation of what errors,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
None
"""
@@ -2992,11 +4126,7 @@ async def delete_operation(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._client._transport.delete_operation,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self.transport._wrapped_methods[self._client._transport.delete_operation]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -3004,6 +4134,9 @@ async def delete_operation(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
request,
@@ -3014,11 +4147,11 @@ async def delete_operation(
async def cancel_operation(
self,
- request: operations_pb2.CancelOperationRequest = None,
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running operation.
@@ -3030,11 +4163,13 @@ async def cancel_operation(
request (:class:`~.operations_pb2.CancelOperationRequest`):
The request object. Request message for
`CancelOperation` method.
- retry (google.api_core.retry.Retry): Designation of what errors,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
None
"""
@@ -3046,11 +4181,7 @@ async def cancel_operation(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._client._transport.cancel_operation,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -3058,6 +4189,9 @@ async def cancel_operation(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
await rpc(
request,
@@ -3066,21 +4200,19 @@ async def cancel_operation(
metadata=metadata,
)
- async def __aenter__(self):
+ async def __aenter__(self) -> "DatabaseAdminAsyncClient":
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
__all__ = ("DatabaseAdminAsyncClient",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
index 23635da722..057aa677f8 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,10 +14,28 @@
# limitations under the License.
#
from collections import OrderedDict
+from http import HTTPStatus
+import json
+import logging as std_logging
import os
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
-import pkg_resources
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+import uuid
+import warnings
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
@@ -28,29 +46,45 @@
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
try:
- OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
except AttributeError: # pragma: NO COVER
- OptionalRetry = Union[retries.Retry, object] # type: ignore
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.spanner_admin_database_v1.services.database_admin import pagers
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import common
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DatabaseAdminGrpcTransport
from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
+from .transports.rest import DatabaseAdminRestTransport
class DatabaseAdminClientMeta(type):
@@ -64,10 +98,11 @@ class DatabaseAdminClientMeta(type):
_transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]]
_transport_registry["grpc"] = DatabaseAdminGrpcTransport
_transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport
+ _transport_registry["rest"] = DatabaseAdminRestTransport
def get_transport_class(
cls,
- label: str = None,
+ label: Optional[str] = None,
) -> Type[DatabaseAdminTransport]:
"""Returns an appropriate transport class.
@@ -92,10 +127,10 @@ class DatabaseAdminClient(metaclass=DatabaseAdminClientMeta):
The Cloud Spanner Database Admin API can be used to:
- - create, drop, and list databases
- - update the schema of pre-existing databases
- - create, delete and list backups for a database
- - restore a database from an existing backup
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
"""
@staticmethod
@@ -128,11 +163,43 @@ def _get_default_mtls_endpoint(api_endpoint):
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = "spanner.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
+ _DEFAULT_ENDPOINT_TEMPLATE = "spanner.{UNIVERSE_DOMAIN}"
+ _DEFAULT_UNIVERSE = "googleapis.com"
+
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -202,6 +269,30 @@ def parse_backup_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def backup_schedule_path(
+ project: str,
+ instance: str,
+ database: str,
+ schedule: str,
+ ) -> str:
+ """Returns a fully-qualified backup_schedule string."""
+ return "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}".format(
+ project=project,
+ instance=instance,
+ database=database,
+ schedule=schedule,
+ )
+
+ @staticmethod
+ def parse_backup_schedule_path(path: str) -> Dict[str, str]:
+ """Parses a backup_schedule path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/databases/(?P.+?)/backupSchedules/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def crypto_key_path(
project: str,
@@ -315,6 +406,28 @@ def parse_instance_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @staticmethod
+ def instance_partition_path(
+ project: str,
+ instance: str,
+ instance_partition: str,
+ ) -> str:
+ """Returns a fully-qualified instance_partition string."""
+ return "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}".format(
+ project=project,
+ instance=instance,
+ instance_partition=instance_partition,
+ )
+
+ @staticmethod
+ def parse_instance_partition_path(path: str) -> Dict[str, str]:
+ """Parses a instance_partition path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/instancePartitions/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def common_billing_account_path(
billing_account: str,
@@ -396,7 +509,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
- """Return the API endpoint and client cert source for mutual TLS.
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
@@ -408,7 +521,7 @@ def get_mtls_endpoint_and_cert_source(
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
- default mTLS endpoint; if the environment variabel is "never", use the default API
+ default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
@@ -426,14 +539,15 @@ def get_mtls_endpoint_and_cert_source(
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
+
+ warnings.warn(
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
+ DeprecationWarning,
+ )
if client_options is None:
client_options = client_options_lib.ClientOptions()
- use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_client_cert = DatabaseAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
@@ -441,7 +555,7 @@ def get_mtls_endpoint_and_cert_source(
# Figure out the client cert source to use.
client_cert_source = None
- if use_client_cert == "true":
+ if use_client_cert:
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
@@ -459,12 +573,173 @@ def get_mtls_endpoint_and_cert_source(
return api_endpoint, client_cert_source
+ @staticmethod
+ def _read_environment_variables():
+ """Returns the environment variables used by the client.
+
+ Returns:
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
+
+ Raises:
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
+ any of ["true", "false"].
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
+ is not any of ["auto", "never", "always"].
+ """
+ use_client_cert = DatabaseAdminClient._use_client_cert_effective()
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
+
+ @staticmethod
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
+ """Return the client cert source to be used by the client.
+
+ Args:
+ provided_cert_source (bytes): The client certificate source provided.
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
+
+ Returns:
+ bytes or None: The client cert source to be used by the client.
+ """
+ client_cert_source = None
+ if use_cert_flag:
+ if provided_cert_source:
+ client_cert_source = provided_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+ return client_cert_source
+
+ @staticmethod
+ def _get_api_endpoint(
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
+ ):
+ """Return the API endpoint used by the client.
+
+ Args:
+ api_override (str): The API endpoint override. If specified, this is always
+ the return value of this function and the other arguments are not used.
+ client_cert_source (bytes): The client certificate source used by the client.
+ universe_domain (str): The universe domain used by the client.
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
+ Possible values are "always", "auto", or "never".
+
+ Returns:
+ str: The API endpoint to be used by the client.
+ """
+ if api_override is not None:
+ api_endpoint = api_override
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ _default_universe = DatabaseAdminClient._DEFAULT_UNIVERSE
+ if universe_domain != _default_universe:
+ raise MutualTLSChannelError(
+ f"mTLS is not supported in any universe other than {_default_universe}."
+ )
+ api_endpoint = DatabaseAdminClient.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = DatabaseAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=universe_domain
+ )
+ return api_endpoint
+
+ @staticmethod
+ def _get_universe_domain(
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
+ ) -> str:
+ """Return the universe domain used by the client.
+
+ Args:
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
+
+ Returns:
+ str: The universe domain to be used by the client.
+
+ Raises:
+ ValueError: If the universe domain is an empty string.
+ """
+ universe_domain = DatabaseAdminClient._DEFAULT_UNIVERSE
+ if client_universe_domain is not None:
+ universe_domain = client_universe_domain
+ elif universe_domain_env is not None:
+ universe_domain = universe_domain_env
+ if len(universe_domain.strip()) == 0:
+ raise ValueError("Universe Domain cannot be an empty string.")
+ return universe_domain
+
+ def _validate_universe_domain(self):
+ """Validates client's and credentials' universe domains are consistent.
+
+ Returns:
+ bool: True iff the configured universe domain is valid.
+
+ Raises:
+ ValueError: If the configured universe domain is not valid.
+ """
+
+ # NOTE (b/349488459): universe validation is disabled until further notice.
+ return True
+
+ def _add_cred_info_for_auth_errors(
+ self, error: core_exceptions.GoogleAPICallError
+ ) -> None:
+ """Adds credential info string to error details for 401/403/404 errors.
+
+ Args:
+ error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info.
+ """
+ if error.code not in [
+ HTTPStatus.UNAUTHORIZED,
+ HTTPStatus.FORBIDDEN,
+ HTTPStatus.NOT_FOUND,
+ ]:
+ return
+
+ cred = self._transport._credentials
+
+ # get_cred_info is only available in google-auth>=2.35.0
+ if not hasattr(cred, "get_cred_info"):
+ return
+
+ # ignore the type check since pypy test fails when get_cred_info
+ # is not available
+ cred_info = cred.get_cred_info() # type: ignore
+ if cred_info and hasattr(error._details, "append"):
+ error._details.append(json.dumps(cred_info))
+
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used by the client instance.
+ """
+ return self._universe_domain
+
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
- transport: Union[str, DatabaseAdminTransport, None] = None,
- client_options: Optional[client_options_lib.ClientOptions] = None,
+ transport: Optional[
+ Union[str, DatabaseAdminTransport, Callable[..., DatabaseAdminTransport]]
+ ] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the database admin client.
@@ -475,25 +750,37 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, DatabaseAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (google.api_core.client_options.ClientOptions): Custom options for the
- client. It won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,DatabaseAdminTransport,Callable[..., DatabaseAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the DatabaseAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that the ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
@@ -504,16 +791,38 @@ def __init__(
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
- if isinstance(client_options, dict):
- client_options = client_options_lib.from_dict(client_options)
- if client_options is None:
- client_options = client_options_lib.ClientOptions()
+ self._client_options = client_options
+ if isinstance(self._client_options, dict):
+ self._client_options = client_options_lib.from_dict(self._client_options)
+ if self._client_options is None:
+ self._client_options = client_options_lib.ClientOptions()
+ self._client_options = cast(
+ client_options_lib.ClientOptions, self._client_options
+ )
- api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
- client_options
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
+
+ (
+ self._use_client_cert,
+ self._use_mtls_endpoint,
+ self._universe_domain_env,
+ ) = DatabaseAdminClient._read_environment_variables()
+ self._client_cert_source = DatabaseAdminClient._get_client_cert_source(
+ self._client_options.client_cert_source, self._use_client_cert
+ )
+ self._universe_domain = DatabaseAdminClient._get_universe_domain(
+ universe_domain_opt, self._universe_domain_env
)
+ self._api_endpoint = None # updated below, depending on `transport`
- api_key_value = getattr(client_options, "api_key", None)
+ # Initialize the universe domain validation.
+ self._is_universe_domain_valid = False
+
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
+ # Setup logging.
+ client_logging.initialize_logging()
+
+ api_key_value = getattr(self._client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
@@ -522,20 +831,33 @@ def __init__(
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
- if isinstance(transport, DatabaseAdminTransport):
+ transport_provided = isinstance(transport, DatabaseAdminTransport)
+ if transport_provided:
# transport is a DatabaseAdminTransport instance.
- if credentials or client_options.credentials_file or api_key_value:
+ if credentials or self._client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
- if client_options.scopes:
+ if self._client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
- self._transport = transport
- else:
+ self._transport = cast(DatabaseAdminTransport, transport)
+ self._api_endpoint = self._transport.host
+
+ self._api_endpoint = (
+ self._api_endpoint
+ or DatabaseAdminClient._get_api_endpoint(
+ self._client_options.api_endpoint,
+ self._client_cert_source,
+ self._universe_domain,
+ self._use_mtls_endpoint,
+ )
+ )
+
+ if not transport_provided:
import google.auth._default # type: ignore
if api_key_value and hasattr(
@@ -545,27 +867,59 @@ def __init__(
api_key_value
)
- Transport = type(self).get_transport_class(transport)
- self._transport = Transport(
+ transport_init: Union[
+ Type[DatabaseAdminTransport], Callable[..., DatabaseAdminTransport]
+ ] = (
+ DatabaseAdminClient.get_transport_class(transport)
+ if isinstance(transport, str) or transport is None
+ else cast(Callable[..., DatabaseAdminTransport], transport)
+ )
+ # initialize with the provided callable or the passed in class
+ self._transport = transport_init(
credentials=credentials,
- credentials_file=client_options.credentials_file,
- host=api_endpoint,
- scopes=client_options.scopes,
- client_cert_source_for_mtls=client_cert_source_func,
- quota_project_id=client_options.quota_project_id,
+ credentials_file=self._client_options.credentials_file,
+ host=self._api_endpoint,
+ scopes=self._client_options.scopes,
+ client_cert_source_for_mtls=self._client_cert_source,
+ quota_project_id=self._client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
- api_audience=client_options.api_audience,
+ api_audience=self._client_options.api_audience,
)
+ if "async" not in str(self._transport):
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.database_v1.DatabaseAdminClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "universeDomain": getattr(
+ self._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "credentialsType": None,
+ },
+ )
+
def list_databases(
self,
- request: Union[spanner_database_admin.ListDatabasesRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabasesRequest, dict]
+ ] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabasesPager:
r"""Lists Cloud Spanner databases.
@@ -611,32 +965,35 @@ def sample_list_databases():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesPager:
The response for
- [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.ListDatabasesRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.ListDatabasesRequest):
request = spanner_database_admin.ListDatabasesRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -654,6 +1011,9 @@ def sample_list_databases():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -668,6 +1028,8 @@ def sample_list_databases():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -676,13 +1038,15 @@ def sample_list_databases():
def create_database(
self,
- request: Union[spanner_database_admin.CreateDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.CreateDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- create_statement: str = None,
+ parent: Optional[str] = None,
+ create_statement: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Creates a new Cloud Spanner database and starts to prepare it
for serving. The returned [long-running
@@ -753,8 +1117,10 @@ def sample_create_database():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -766,19 +1132,20 @@ def sample_create_database():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, create_statement])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, create_statement]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.CreateDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.CreateDatabaseRequest):
request = spanner_database_admin.CreateDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -798,6 +1165,9 @@ def sample_create_database():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -819,12 +1189,14 @@ def sample_create_database():
def get_database(
self,
- request: Union[spanner_database_admin.GetDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseRequest, dict]
+ ] = None,
*,
- name: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.Database:
r"""Gets the state of a Cloud Spanner database.
@@ -869,27 +1241,30 @@ def sample_get_database():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Database:
A Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.GetDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.GetDatabaseRequest):
request = spanner_database_admin.GetDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -907,6 +1282,9 @@ def sample_get_database():
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -918,25 +1296,56 @@ def sample_get_database():
# Done; return the response.
return response
- def update_database_ddl(
+ def update_database(
self,
- request: Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseRequest, dict]
+ ] = None,
*,
- database: str = None,
- statements: Sequence[str] = None,
+ database: Optional[spanner_database_admin.Database] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
- r"""Updates the schema of a Cloud Spanner database by
- creating/altering/dropping tables, columns, indexes, etc. The
- returned [long-running operation][google.longrunning.Operation]
- will have a name of the format
- ``/operations/`` and can be used to
- track execution of the schema change(s). The
+ r"""Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
[metadata][google.longrunning.Operation.metadata] field type is
- [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
- The operation has no response.
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
.. code-block:: python
@@ -949,18 +1358,20 @@ def update_database_ddl(
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_database_v1
- def sample_update_database_ddl():
+ def sample_update_database():
# Create a client
client = spanner_admin_database_v1.DatabaseAdminClient()
# Initialize request argument(s)
- request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
- database="database_value",
- statements=['statements_value1', 'statements_value2'],
+ database = spanner_admin_database_v1.Database()
+ database.name = "name_value"
+
+ request = spanner_admin_database_v1.UpdateDatabaseRequest(
+ database=database,
)
# Make the request
- operation = client.update_database_ddl(request=request)
+ operation = client.update_database(request=request)
print("Waiting for operation to complete...")
@@ -970,93 +1381,80 @@ def sample_update_database_ddl():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]):
- The request object. Enqueues the given DDL statements to
- be applied, in order but not necessarily all at once, to
- the database schema at some point (or points) in the
- future. The server checks that the statements are
- executable (syntactically valid, name tables that exist,
- etc.) before enqueueing them, but they may still fail
- upon
- later execution (e.g., if a statement from another batch
- of statements is applied first and it conflicts in some
- way, or if there is some data-related problem like a
- `NULL` value in a column to which `NOT NULL` would be
- added). If a statement fails, all subsequent statements
- in the batch are automatically cancelled.
- Each batch of statements is assigned a name which can be
- used with the
- [Operations][google.longrunning.Operations] API to
- monitor progress. See the
- [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
- field for more details.
- database (str):
- Required. The database to update.
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest, dict]):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ database (google.cloud.spanner_admin_database_v1.types.Database):
+ Required. The database to update. The ``name`` field of
+ the database is of the form
+ ``projects//instances//databases/``.
+
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- statements (Sequence[str]):
- Required. DDL statements to be
- applied to the database.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
- This corresponds to the ``statements`` field
+ This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
- The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
- empty messages in your APIs. A typical example is to
- use it as the request or the response type of an API
- method. For instance:
-
- service Foo {
- rpc Bar(google.protobuf.Empty) returns
- (google.protobuf.Empty);
-
- }
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_database_v1.types.Database`
+ A Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database, statements])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.UpdateDatabaseDdlRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
- if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest):
- request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseRequest):
+ request = spanner_database_admin.UpdateDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if database is not None:
request.database = database
- if statements is not None:
- request.statements = statements
+ if update_mask is not None:
+ request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = self._transport._wrapped_methods[self._transport.update_database_ddl]
+ rpc = self._transport._wrapped_methods[self._transport.update_database]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("database.name", request.database.name),)
+ ),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -1069,26 +1467,34 @@ def sample_update_database_ddl():
response = operation.from_gapic(
response,
self._transport.operations_client,
- empty_pb2.Empty,
- metadata_type=spanner_database_admin.UpdateDatabaseDdlMetadata,
+ spanner_database_admin.Database,
+ metadata_type=spanner_database_admin.UpdateDatabaseMetadata,
)
# Done; return the response.
return response
- def drop_database(
+ def update_database_ddl(
self,
- request: Union[spanner_database_admin.DropDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.UpdateDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
+ database: Optional[str] = None,
+ statements: Optional[MutableSequence[str]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> None:
- r"""Drops (aka deletes) a Cloud Spanner database. Completed backups
- for the database will be retained according to their
- ``expire_time``. Note: Cloud Spanner might continue to accept
- requests for a few seconds after the database has been deleted.
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation.Operation:
+ r"""Updates the schema of a Cloud Spanner database by
+ creating/altering/dropping tables, columns, indexes, etc. The
+ returned [long-running operation][google.longrunning.Operation]
+ will have a name of the format
+ ``/operations/`` and can be used to
+ track execution of the schema change(s). The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ The operation has no response.
.. code-block:: python
@@ -1101,47 +1507,210 @@ def drop_database(
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_database_v1
- def sample_drop_database():
+ def sample_update_database_ddl():
# Create a client
client = spanner_admin_database_v1.DatabaseAdminClient()
# Initialize request argument(s)
- request = spanner_admin_database_v1.DropDatabaseRequest(
+ request = spanner_admin_database_v1.UpdateDatabaseDdlRequest(
database="database_value",
+ statements=['statements_value1', 'statements_value2'],
)
# Make the request
- client.drop_database(request=request)
+ operation = client.update_database_ddl(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
Args:
- request (Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]):
- The request object. The request for
- [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
- database (str):
- Required. The database to be dropped.
- This corresponds to the ``database`` field
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateDatabaseDdlRequest, dict]):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
+ later execution (e.g., if a statement from another batch
+ of statements is applied first and it conflicts in some
+ way, or if there is some data-related problem like a
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
+ Each batch of statements is assigned a name which can be
+ used with the
+ [Operations][google.longrunning.Operations] API to
+ monitor progress. See the
+ [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+ field for more details.
+ database (str):
+ Required. The database to update.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ statements (MutableSequence[str]):
+ Required. DDL statements to be
+ applied to the database.
+
+ This corresponds to the ``statements`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, statements]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.UpdateDatabaseDdlRequest):
+ request = spanner_database_admin.UpdateDatabaseDdlRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if statements is not None:
+ request.statements = statements
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_database_ddl]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty_pb2.Empty,
+ metadata_type=spanner_database_admin.UpdateDatabaseDdlMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def drop_database(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.DropDatabaseRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Drops (aka deletes) a Cloud Spanner database. Completed backups
+ for the database will be retained according to their
+ ``expire_time``. Note: Cloud Spanner might continue to accept
+ requests for a few seconds after the database has been deleted.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_drop_database():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DropDatabaseRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ client.drop_database(request=request)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.DropDatabaseRequest, dict]):
+ The request object. The request for
+ [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+ database (str):
+ Required. The database to be dropped.
+ This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.DropDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.DropDatabaseRequest):
request = spanner_database_admin.DropDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1159,6 +1728,9 @@ def sample_drop_database():
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
request,
@@ -1169,12 +1741,14 @@ def sample_drop_database():
def get_database_ddl(
self,
- request: Union[spanner_database_admin.GetDatabaseDdlRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.GetDatabaseDdlRequest, dict]
+ ] = None,
*,
- database: str = None,
+ database: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_database_admin.GetDatabaseDdlResponse:
r"""Returns the schema of a Cloud Spanner database as a list of
formatted DDL statements. This method does not show pending
@@ -1222,29 +1796,32 @@ def sample_get_database_ddl():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse:
The response for
- [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([database])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.GetDatabaseDdlRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.GetDatabaseDdlRequest):
request = spanner_database_admin.GetDatabaseDdlRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1262,6 +1839,9 @@ def sample_get_database_ddl():
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -1275,12 +1855,12 @@ def sample_get_database_ddl():
def set_iam_policy(
self,
- request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
+ resource: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on a database or backup resource.
Replaces any existing policy.
@@ -1321,8 +1901,7 @@ def sample_set_iam_policy():
Args:
request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
- The request object. Request message for `SetIamPolicy`
- method.
+ The request object. Request message for ``SetIamPolicy`` method.
resource (str):
REQUIRED: The resource for which the
policy is being specified. See the
@@ -1335,8 +1914,10 @@ def sample_set_iam_policy():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
@@ -1357,56 +1938,28 @@ def sample_set_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1414,8 +1967,8 @@ def sample_set_iam_policy():
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
@@ -1433,6 +1986,9 @@ def sample_set_iam_policy():
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -1446,12 +2002,12 @@ def sample_set_iam_policy():
def get_iam_policy(
self,
- request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
- resource: str = None,
+ resource: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a database or backup
resource. Returns an empty policy if a database or backup exists
@@ -1493,8 +2049,7 @@ def sample_get_iam_policy():
Args:
request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
- The request object. Request message for `GetIamPolicy`
- method.
+ The request object. Request message for ``GetIamPolicy`` method.
resource (str):
REQUIRED: The resource for which the
policy is being requested. See the
@@ -1507,8 +2062,10 @@ def sample_get_iam_policy():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.policy_pb2.Policy:
@@ -1529,56 +2086,28 @@ def sample_get_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1586,8 +2115,8 @@ def sample_get_iam_policy():
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
@@ -1605,6 +2134,9 @@ def sample_get_iam_policy():
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -1618,13 +2150,13 @@ def sample_get_iam_policy():
def test_iam_permissions(
self,
- request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
- resource: str = None,
- permissions: Sequence[str] = None,
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the specified
database or backup resource.
@@ -1667,8 +2199,7 @@ def sample_test_iam_permissions():
Args:
request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
- The request object. Request message for
- `TestIamPermissions` method.
+ The request object. Request message for ``TestIamPermissions`` method.
resource (str):
REQUIRED: The resource for which the
policy detail is being requested. See
@@ -1678,7 +2209,7 @@ def sample_test_iam_permissions():
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- permissions (Sequence[str]):
+ permissions (MutableSequence[str]):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
@@ -1690,17 +2221,22 @@ def sample_test_iam_permissions():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource, permissions])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1708,8 +2244,8 @@ def sample_test_iam_permissions():
)
if isinstance(request, dict):
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
# Null request, just make one.
@@ -1729,6 +2265,9 @@ def sample_test_iam_permissions():
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -1742,14 +2281,14 @@ def sample_test_iam_permissions():
def create_backup(
self,
- request: Union[gsad_backup.CreateBackupRequest, dict] = None,
+ request: Optional[Union[gsad_backup.CreateBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup: gsad_backup.Backup = None,
- backup_id: str = None,
+ parent: Optional[str] = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ backup_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Starts creating a new Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1829,8 +2368,10 @@ def sample_create_backup():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -1842,19 +2383,20 @@ def sample_create_backup():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup, backup_id])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup, backup_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a gsad_backup.CreateBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, gsad_backup.CreateBackupRequest):
request = gsad_backup.CreateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -1876,6 +2418,9 @@ def sample_create_backup():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -1897,15 +2442,15 @@ def sample_create_backup():
def copy_backup(
self,
- request: Union[backup.CopyBackupRequest, dict] = None,
+ request: Optional[Union[backup.CopyBackupRequest, dict]] = None,
*,
- parent: str = None,
- backup_id: str = None,
- source_backup: str = None,
- expire_time: timestamp_pb2.Timestamp = None,
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Starts copying a Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
@@ -1918,8 +2463,8 @@ def copy_backup(
The [response][google.longrunning.Operation.response] field type
is [Backup][google.spanner.admin.database.v1.Backup], if
successful. Cancelling the returned operation will stop the
- copying and delete the backup. Concurrent CopyBackup requests
- can run on the same source backup.
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
.. code-block:: python
@@ -2000,8 +2545,10 @@ def sample_copy_backup():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -2013,19 +2560,20 @@ def sample_copy_backup():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, backup_id, source_backup, expire_time])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_id, source_backup, expire_time]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.CopyBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.CopyBackupRequest):
request = backup.CopyBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2049,6 +2597,9 @@ def sample_copy_backup():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -2070,12 +2621,12 @@ def sample_copy_backup():
def get_backup(
self,
- request: Union[backup.GetBackupRequest, dict] = None,
+ request: Optional[Union[backup.GetBackupRequest, dict]] = None,
*,
- name: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> backup.Backup:
r"""Gets metadata on a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
@@ -2120,27 +2671,30 @@ def sample_get_backup():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.GetBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.GetBackupRequest):
request = backup.GetBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2158,6 +2712,9 @@ def sample_get_backup():
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -2171,13 +2728,13 @@ def sample_get_backup():
def update_backup(
self,
- request: Union[gsad_backup.UpdateBackupRequest, dict] = None,
+ request: Optional[Union[gsad_backup.UpdateBackupRequest, dict]] = None,
*,
- backup: gsad_backup.Backup = None,
- update_mask: field_mask_pb2.FieldMask = None,
+ backup: Optional[gsad_backup.Backup] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> gsad_backup.Backup:
r"""Updates a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
@@ -2217,7 +2774,7 @@ def sample_update_backup():
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2237,27 +2794,30 @@ def sample_update_backup():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.types.Backup:
A backup of a Cloud Spanner database.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([backup, update_mask])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a gsad_backup.UpdateBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, gsad_backup.UpdateBackupRequest):
request = gsad_backup.UpdateBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2279,6 +2839,9 @@ def sample_update_backup():
),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -2292,12 +2855,12 @@ def sample_update_backup():
def delete_backup(
self,
- request: Union[backup.DeleteBackupRequest, dict] = None,
+ request: Optional[Union[backup.DeleteBackupRequest, dict]] = None,
*,
- name: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
@@ -2340,23 +2903,26 @@ def sample_delete_backup():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.DeleteBackupRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.DeleteBackupRequest):
request = backup.DeleteBackupRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2374,6 +2940,9 @@ def sample_delete_backup():
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
request,
@@ -2384,12 +2953,12 @@ def sample_delete_backup():
def list_backups(
self,
- request: Union[backup.ListBackupsRequest, dict] = None,
+ request: Optional[Union[backup.ListBackupsRequest, dict]] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupsPager:
r"""Lists completed and pending backups. Backups returned are
ordered by ``create_time`` in descending order, starting from
@@ -2436,32 +3005,35 @@ def sample_list_backups():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsPager:
The response for
- [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.ListBackupsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.ListBackupsRequest):
request = backup.ListBackupsRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2479,6 +3051,9 @@ def sample_list_backups():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -2493,6 +3068,8 @@ def sample_list_backups():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -2501,14 +3078,16 @@ def sample_list_backups():
def restore_database(
self,
- request: Union[spanner_database_admin.RestoreDatabaseRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.RestoreDatabaseRequest, dict]
+ ] = None,
*,
- parent: str = None,
- database_id: str = None,
- backup: str = None,
+ parent: Optional[str] = None,
+ database_id: Optional[str] = None,
+ backup: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operation.Operation:
r"""Create a new database by restoring from a completed backup. The
new database must be in the same project and in an instance with
@@ -2597,8 +3176,10 @@ def sample_restore_database():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.api_core.operation.Operation:
@@ -2610,19 +3191,20 @@ def sample_restore_database():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, database_id, backup])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, database_id, backup]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.RestoreDatabaseRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.RestoreDatabaseRequest):
request = spanner_database_admin.RestoreDatabaseRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2644,6 +3226,9 @@ def sample_restore_database():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -2665,14 +3250,14 @@ def sample_restore_database():
def list_database_operations(
self,
- request: Union[
- spanner_database_admin.ListDatabaseOperationsRequest, dict
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseOperationsRequest, dict]
] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseOperationsPager:
r"""Lists database
[longrunning-operations][google.longrunning.Operation]. A
@@ -2727,8 +3312,10 @@ def sample_list_database_operations():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsPager:
@@ -2740,19 +3327,20 @@ def sample_list_database_operations():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.ListDatabaseOperationsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(
request, spanner_database_admin.ListDatabaseOperationsRequest
):
@@ -2772,6 +3360,9 @@ def sample_list_database_operations():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -2786,6 +3377,8 @@ def sample_list_database_operations():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -2794,12 +3387,12 @@ def sample_list_database_operations():
def list_backup_operations(
self,
- request: Union[backup.ListBackupOperationsRequest, dict] = None,
+ request: Optional[Union[backup.ListBackupOperationsRequest, dict]] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListBackupOperationsPager:
r"""Lists the backup [long-running
operations][google.longrunning.Operation] in the given instance.
@@ -2856,8 +3449,10 @@ def sample_list_backup_operations():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsPager:
@@ -2869,19 +3464,20 @@ def sample_list_backup_operations():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a backup.ListBackupOperationsRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, backup.ListBackupOperationsRequest):
request = backup.ListBackupOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -2899,6 +3495,9 @@ def sample_list_backup_operations():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -2913,6 +3512,8 @@ def sample_list_backup_operations():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -2921,12 +3522,14 @@ def sample_list_backup_operations():
def list_database_roles(
self,
- request: Union[spanner_database_admin.ListDatabaseRolesRequest, dict] = None,
+ request: Optional[
+ Union[spanner_database_admin.ListDatabaseRolesRequest, dict]
+ ] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListDatabaseRolesPager:
r"""Lists Cloud Spanner database roles.
@@ -2964,7 +3567,7 @@ def sample_list_database_roles():
parent (str):
Required. The database whose roles should be listed.
Values are of the form
- ``projects//instances//databases//databaseRoles``.
+ ``projects//instances//databases/``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2972,32 +3575,35 @@ def sample_list_database_roles():
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesPager:
The response for
- [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # Minor optimization to avoid making a copy if the user passes
- # in a spanner_database_admin.ListDatabaseRolesRequest.
- # There's no risk of modifying the input as we've already verified
- # there are no flattened fields.
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
if not isinstance(request, spanner_database_admin.ListDatabaseRolesRequest):
request = spanner_database_admin.ListDatabaseRolesRequest(request)
# If we have keyword arguments corresponding to fields on the
@@ -3015,6 +3621,9 @@ def sample_list_database_roles():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -3029,61 +3638,591 @@ def sample_list_database_roles():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- """Releases underlying transport's resources.
-
- .. warning::
- ONLY use as a context manager if the transport is NOT shared
- with other clients! Exiting the with block will CLOSE the transport
- and may cause errors in other clients!
- """
- self.transport.close()
-
- def list_operations(
+ def add_split_points(
self,
- request: operations_pb2.ListOperationsRequest = None,
+ request: Optional[
+ Union[spanner_database_admin.AddSplitPointsRequest, dict]
+ ] = None,
*,
+ database: Optional[str] = None,
+ split_points: Optional[
+ MutableSequence[spanner_database_admin.SplitPoints]
+ ] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> operations_pb2.ListOperationsResponse:
- r"""Lists operations that match the specified filter in the request.
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Adds split points to specified tables, indexes of a
+ database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_add_split_points():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.AddSplitPointsRequest(
+ database="database_value",
+ )
+
+ # Make the request
+ response = client.add_split_points(request=request)
+
+ # Handle the response
+ print(response)
Args:
- request (:class:`~.operations_pb2.ListOperationsRequest`):
- The request object. Request message for
- `ListOperations` method.
- retry (google.api_core.retry.Retry): Designation of what errors,
- if any, should be retried.
+ request (Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ database (str):
+ Required. The database on whose tables/indexes split
+ points are to be added. Values are of the form
+ ``projects//instances//databases/``.
+
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ split_points (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]):
+ Required. The split points to add.
+ This corresponds to the ``split_points`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
Returns:
- ~.operations_pb2.ListOperationsResponse:
- Response message for ``ListOperations`` method.
+ google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
"""
# Create or coerce a protobuf request object.
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
- if isinstance(request, dict):
- request = operations_pb2.ListOperationsRequest(**request)
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, split_points]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_database_admin.AddSplitPointsRequest):
+ request = spanner_database_admin.AddSplitPointsRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if split_points is not None:
+ request.split_points = split_points
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.list_operations,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
+ rpc = self._transport._wrapped_methods[self._transport.add_split_points]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def create_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.CreateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ backup_schedule_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Creates a new backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_create_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.CreateBackupScheduleRequest(
+ parent="parent_value",
+ backup_schedule_id="backup_schedule_id_value",
+ )
+
+ # Make the request
+ response = client.create_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.CreateBackupScheduleRequest, dict]):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ parent (str):
+ Required. The name of the database
+ that this backup schedule applies to.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to
+ create.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_schedule_id (str):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the
+ full backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``backup_schedule_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, backup_schedule, backup_schedule_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.CreateBackupScheduleRequest):
+ request = gsad_backup_schedule.CreateBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if backup_schedule_id is not None:
+ request.backup_schedule_id = backup_schedule_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_backup_schedule(
+ self,
+ request: Optional[Union[backup_schedule.GetBackupScheduleRequest, dict]] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Gets backup schedule for the input schedule name.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_get_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.GetBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.GetBackupScheduleRequest, dict]):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ name (str):
+ Required. The name of the schedule to retrieve. Values
+ are of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.GetBackupScheduleRequest):
+ request = backup_schedule.GetBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_backup_schedule(
+ self,
+ request: Optional[
+ Union[gsad_backup_schedule.UpdateBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ backup_schedule: Optional[gsad_backup_schedule.BackupSchedule] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Updates a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_update_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.UpdateBackupScheduleRequest(
+ )
+
+ # Make the request
+ response = client.update_backup_schedule(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.UpdateBackupScheduleRequest, dict]):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated
+ as specified by ``update_mask`` are required. Other
+ fields are ignored.
+
+ This corresponds to the ``backup_schedule`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. A mask specifying which
+ fields in the BackupSchedule resource
+ should be updated. This mask is relative
+ to the BackupSchedule resource, not to
+ the request message. The field mask must
+ always be specified; this prevents any
+ future fields from being erased
+ accidentally.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [backup_schedule, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, gsad_backup_schedule.UpdateBackupScheduleRequest):
+ request = gsad_backup_schedule.UpdateBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if backup_schedule is not None:
+ request.backup_schedule = backup_schedule
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_backup_schedule]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("backup_schedule.name", request.backup_schedule.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_backup_schedule(
+ self,
+ request: Optional[
+ Union[backup_schedule.DeleteBackupScheduleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a backup schedule.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_delete_backup_schedule():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.DeleteBackupScheduleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_backup_schedule(request=request)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.DeleteBackupScheduleRequest, dict]):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ name (str):
+ Required. The name of the schedule to delete. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
)
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.DeleteBackupScheduleRequest):
+ request = backup_schedule.DeleteBackupScheduleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_backup_schedule]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -3091,6 +4230,252 @@ def list_operations(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ def list_backup_schedules(
+ self,
+ request: Optional[
+ Union[backup_schedule.ListBackupSchedulesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListBackupSchedulesPager:
+ r"""Lists all the backup schedules for the database.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_list_backup_schedules():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.ListBackupSchedulesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backup_schedules(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest, dict]):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ parent (str):
+ Required. Database is the parent
+ resource whose backup schedules should
+ be listed. Values are of the form
+ projects//instances//databases/
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesPager:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, backup_schedule.ListBackupSchedulesRequest):
+ request = backup_schedule.ListBackupSchedulesRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_backup_schedules]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListBackupSchedulesPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def internal_update_graph_operation(
+ self,
+ request: Optional[
+ Union[spanner_database_admin.InternalUpdateGraphOperationRequest, dict]
+ ] = None,
+ *,
+ database: Optional[str] = None,
+ operation_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ r"""This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_database_v1
+
+ def sample_internal_update_graph_operation():
+ # Create a client
+ client = spanner_admin_database_v1.DatabaseAdminClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_database_v1.InternalUpdateGraphOperationRequest(
+ database="database_value",
+ operation_id="operation_id_value",
+ vm_identity_token="vm_identity_token_value",
+ )
+
+ # Make the request
+ response = client.internal_update_graph_operation(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationRequest, dict]):
+ The request object. Internal request proto, do not use
+ directly.
+ database (str):
+ Internal field, do not use directly.
+ This corresponds to the ``database`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ operation_id (str):
+ Internal field, do not use directly.
+ This corresponds to the ``operation_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_database_v1.types.InternalUpdateGraphOperationResponse:
+ Internal response proto, do not use
+ directly.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [database, operation_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_database_admin.InternalUpdateGraphOperationRequest
+ ):
+ request = spanner_database_admin.InternalUpdateGraphOperationRequest(
+ request
+ )
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if database is not None:
+ request.database = database
+ if operation_id is not None:
+ request.operation_id = operation_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.internal_update_graph_operation
+ ]
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
response = rpc(
request,
@@ -3102,13 +4487,85 @@ def list_operations(
# Done; return the response.
return response
+ def __enter__(self) -> "DatabaseAdminClient":
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
+ def list_operations(
+ self,
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Lists operations that match the specified filter in the request.
+
+ Args:
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
+ The request object. Request message for
+ `ListOperations` method.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ Returns:
+ ~.operations_pb2.ListOperationsResponse:
+ Response message for ``ListOperations`` method.
+ """
+ # Create or coerce a protobuf request object.
+ # The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = operations_pb2.ListOperationsRequest(**request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_operations]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ try:
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+ except core_exceptions.GoogleAPICallError as e:
+ self._add_cred_info_for_auth_errors(e)
+ raise e
+
def get_operation(
self,
- request: operations_pb2.GetOperationRequest = None,
+ request: Optional[operations_pb2.GetOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
@@ -3119,8 +4576,10 @@ def get_operation(
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
@@ -3133,11 +4592,7 @@ def get_operation(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.get_operation,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._transport._wrapped_methods[self._transport.get_operation]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -3145,24 +4600,31 @@ def get_operation(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
- # Send the request.
- response = rpc(
- request,
- retry=retry,
- timeout=timeout,
- metadata=metadata,
- )
+ # Validate the universe domain.
+ self._validate_universe_domain()
- # Done; return the response.
- return response
+ try:
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+ except core_exceptions.GoogleAPICallError as e:
+ self._add_cred_info_for_auth_errors(e)
+ raise e
def delete_operation(
self,
- request: operations_pb2.DeleteOperationRequest = None,
+ request: Optional[operations_pb2.DeleteOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Deletes a long-running operation.
@@ -3178,8 +4640,10 @@ def delete_operation(
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
None
"""
@@ -3191,11 +4655,7 @@ def delete_operation(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.delete_operation,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._transport._wrapped_methods[self._transport.delete_operation]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -3203,6 +4663,9 @@ def delete_operation(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
request,
@@ -3213,11 +4676,11 @@ def delete_operation(
def cancel_operation(
self,
- request: operations_pb2.CancelOperationRequest = None,
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running operation.
@@ -3232,8 +4695,10 @@ def cancel_operation(
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
None
"""
@@ -3245,11 +4710,7 @@ def cancel_operation(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.cancel_operation,
- default_timeout=None,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._transport._wrapped_methods[self._transport.cancel_operation]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -3257,6 +4718,9 @@ def cancel_operation(
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
# Send the request.
rpc(
request,
@@ -3266,14 +4730,11 @@ def cancel_operation(
)
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
__all__ = ("DatabaseAdminClient",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
index 6faa0f5d66..c9e2e14d52 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import retry_async as retries_async
from typing import (
Any,
AsyncIterator,
@@ -22,9 +25,20 @@
Tuple,
Optional,
Iterator,
+ Union,
)
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+ OptionalAsyncRetry = Union[
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
+ ]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
+
from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.longrunning import operations_pb2 # type: ignore
@@ -53,7 +67,9 @@ def __init__(
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -64,12 +80,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -80,7 +103,12 @@ def pages(self) -> Iterator[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __iter__(self) -> Iterator[spanner_database_admin.Database]:
@@ -115,7 +143,9 @@ def __init__(
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -126,12 +156,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -144,7 +181,12 @@ async def pages(
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __aiter__(self) -> AsyncIterator[spanner_database_admin.Database]:
@@ -183,7 +225,9 @@ def __init__(
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -194,12 +238,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -210,7 +261,12 @@ def pages(self) -> Iterator[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __iter__(self) -> Iterator[backup.Backup]:
@@ -245,7 +301,9 @@ def __init__(
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -256,12 +314,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -272,7 +337,12 @@ async def pages(self) -> AsyncIterator[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __aiter__(self) -> AsyncIterator[backup.Backup]:
@@ -311,7 +381,9 @@ def __init__(
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -322,12 +394,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -338,7 +417,12 @@ def pages(self) -> Iterator[spanner_database_admin.ListDatabaseOperationsRespons
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __iter__(self) -> Iterator[operations_pb2.Operation]:
@@ -375,7 +459,9 @@ def __init__(
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -386,12 +472,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -404,7 +497,12 @@ async def pages(
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __aiter__(self) -> AsyncIterator[operations_pb2.Operation]:
@@ -443,7 +541,9 @@ def __init__(
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -454,12 +554,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -470,7 +577,12 @@ def pages(self) -> Iterator[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __iter__(self) -> Iterator[operations_pb2.Operation]:
@@ -505,7 +617,9 @@ def __init__(
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -516,12 +630,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -532,7 +653,12 @@ async def pages(self) -> AsyncIterator[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __aiter__(self) -> AsyncIterator[operations_pb2.Operation]:
@@ -571,7 +697,9 @@ def __init__(
request: spanner_database_admin.ListDatabaseRolesRequest,
response: spanner_database_admin.ListDatabaseRolesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiate the pager.
@@ -582,12 +710,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseRolesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -598,7 +733,12 @@ def pages(self) -> Iterator[spanner_database_admin.ListDatabaseRolesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request, metadata=self._metadata)
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __iter__(self) -> Iterator[spanner_database_admin.DatabaseRole]:
@@ -635,7 +775,9 @@ def __init__(
request: spanner_database_admin.ListDatabaseRolesRequest,
response: spanner_database_admin.ListDatabaseRolesResponse,
*,
- metadata: Sequence[Tuple[str, str]] = ()
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
):
"""Instantiates the pager.
@@ -646,12 +788,19 @@ def __init__(
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseRolesResponse):
The initial response object.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseRolesRequest(request)
self._response = response
+ self._retry = retry
+ self._timeout = timeout
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
@@ -664,7 +813,12 @@ async def pages(
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request, metadata=self._metadata)
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
yield self._response
def __aiter__(self) -> AsyncIterator[spanner_database_admin.DatabaseRole]:
@@ -677,3 +831,159 @@ async def async_generator():
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBackupSchedulesPager:
+ """A pager for iterating through ``list_backup_schedules`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``backup_schedules`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListBackupSchedules`` requests and continue to iterate
+ through the ``backup_schedules`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., backup_schedule.ListBackupSchedulesResponse],
+ request: backup_schedule.ListBackupSchedulesRequest,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse):
+ The initial response object.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = backup_schedule.ListBackupSchedulesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterator[backup_schedule.ListBackupSchedulesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __iter__(self) -> Iterator[backup_schedule.BackupSchedule]:
+ for page in self.pages:
+ yield from page.backup_schedules
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBackupSchedulesAsyncPager:
+ """A pager for iterating through ``list_backup_schedules`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``backup_schedules`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListBackupSchedules`` requests and continue to iterate
+ through the ``backup_schedules`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[backup_schedule.ListBackupSchedulesResponse]],
+ request: backup_schedule.ListBackupSchedulesRequest,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ *,
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiates the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesRequest):
+ The initial request object.
+ response (google.cloud.spanner_admin_database_v1.types.ListBackupSchedulesResponse):
+ The initial response object.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = backup_schedule.ListBackupSchedulesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterator[backup_schedule.ListBackupSchedulesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterator[backup_schedule.BackupSchedule]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.backup_schedules:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst
new file mode 100644
index 0000000000..f70c023a98
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/README.rst
@@ -0,0 +1,9 @@
+
+transport inheritance structure
+_______________________________
+
+`DatabaseAdminTransport` is the ABC for all transports.
+- public child `DatabaseAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`).
+- public child `DatabaseAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`).
+- private child `_BaseDatabaseAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`).
+- public child `DatabaseAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`).
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
index 8b203ec615..23ba04ea21 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,15 +19,20 @@
from .base import DatabaseAdminTransport
from .grpc import DatabaseAdminGrpcTransport
from .grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport
+from .rest import DatabaseAdminRestTransport
+from .rest import DatabaseAdminRestInterceptor
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DatabaseAdminTransport]]
_transport_registry["grpc"] = DatabaseAdminGrpcTransport
_transport_registry["grpc_asyncio"] = DatabaseAdminGrpcAsyncIOTransport
+_transport_registry["rest"] = DatabaseAdminRestTransport
__all__ = (
"DatabaseAdminTransport",
"DatabaseAdminGrpcTransport",
"DatabaseAdminGrpcAsyncIOTransport",
+ "DatabaseAdminRestTransport",
+ "DatabaseAdminRestInterceptor",
)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
index 26ac640940..16a075d983 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,7 +15,8 @@
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
-import pkg_resources
+
+from google.cloud.spanner_admin_database_v1 import gapic_version as package_version
import google.auth # type: ignore
import google.api_core
@@ -25,24 +26,26 @@
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
-try:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution(
- "google-cloud-spanner-admin-database",
- ).version,
- )
-except pkg_resources.DistributionNotFound:
- DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=package_version.__version__
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
class DatabaseAdminTransport(abc.ABC):
@@ -59,7 +62,7 @@ def __init__(
self,
*,
host: str = DEFAULT_HOST,
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -72,15 +75,16 @@ def __init__(
Args:
host (Optional[str]):
- The hostname to connect to.
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
@@ -97,6 +101,8 @@ def __init__(
# Save the scopes.
self._scopes = scopes
+ if not hasattr(self, "_ignore_credentials"):
+ self._ignore_credentials: bool = False
# If no credentials are provided, then determine the appropriate
# defaults.
@@ -109,7 +115,7 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
- elif credentials is None:
+ elif credentials is None and not self._ignore_credentials:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
@@ -135,6 +141,10 @@ def __init__(
host += ":443"
self._host = host
+ @property
+ def host(self):
+ return self._host
+
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -173,6 +183,21 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=3600.0,
client_info=client_info,
),
+ self.update_database: gapic_v1.method.wrap_method(
+ self.update_database,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
self.update_database_ddl: gapic_v1.method.wrap_method(
self.update_database_ddl,
default_retry=retries.Retry(
@@ -363,6 +388,121 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=3600.0,
client_info=client_info,
),
+ self.add_split_points: gapic_v1.method.wrap_method(
+ self.add_split_points,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_backup_schedule: gapic_v1.method.wrap_method(
+ self.create_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup_schedule: gapic_v1.method.wrap_method(
+ self.get_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup_schedule: gapic_v1.method.wrap_method(
+ self.update_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup_schedule: gapic_v1.method.wrap_method(
+ self.delete_backup_schedule,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_schedules: gapic_v1.method.wrap_method(
+ self.list_backup_schedules,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.internal_update_graph_operation: gapic_v1.method.wrap_method(
+ self.internal_update_graph_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_operation: gapic_v1.method.wrap_method(
+ self.cancel_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_operation: gapic_v1.method.wrap_method(
+ self.delete_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_operation: gapic_v1.method.wrap_method(
+ self.get_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_operations: gapic_v1.method.wrap_method(
+ self.list_operations,
+ default_timeout=None,
+ client_info=client_info,
+ ),
}
def close(self):
@@ -411,6 +551,15 @@ def get_database(
]:
raise NotImplementedError()
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
@property
def update_database_ddl(
self,
@@ -568,6 +717,86 @@ def list_database_roles(
]:
raise NotImplementedError()
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ Union[
+ spanner_database_admin.AddSplitPointsResponse,
+ Awaitable[spanner_database_admin.AddSplitPointsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ Union[
+ gsad_backup_schedule.BackupSchedule,
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest],
+ Union[
+ backup_schedule.BackupSchedule, Awaitable[backup_schedule.BackupSchedule]
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ Union[
+ gsad_backup_schedule.BackupSchedule,
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.DeleteBackupScheduleRequest],
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ Union[
+ backup_schedule.ListBackupSchedulesResponse,
+ Awaitable[backup_schedule.ListBackupSchedulesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ Union[
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ Awaitable[spanner_database_admin.InternalUpdateGraphOperationResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
@property
def list_operations(
self,
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
index bdff991c79..0888d9af16 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import json
+import logging as std_logging
+import pickle
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
@@ -22,19 +25,99 @@
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.protobuf.json_format import MessageToJson
+import google.protobuf.message
import grpc # type: ignore
+import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+
+class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
+ def intercept_unary_unary(self, continuation, client_call_details, request):
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ )
+ if logging_enabled: # pragma: NO COVER
+ request_metadata = client_call_details.metadata
+ if isinstance(request, proto.Message):
+ request_payload = type(request).to_json(request)
+ elif isinstance(request, google.protobuf.message.Message):
+ request_payload = MessageToJson(request)
+ else:
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
+
+ request_metadata = {
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
+ for key, value in request_metadata
+ }
+ grpc_request = {
+ "payload": request_payload,
+ "requestMethod": "grpc",
+ "metadata": dict(request_metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for {client_call_details.method}",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "request": grpc_request,
+ "metadata": grpc_request["metadata"],
+ },
+ )
+ response = continuation(client_call_details, request)
+ if logging_enabled: # pragma: NO COVER
+ response_metadata = response.trailing_metadata()
+ # Convert gRPC metadata `` to list of tuples
+ metadata = (
+ dict([(k, str(v)) for k, v in response_metadata])
+ if response_metadata
+ else None
+ )
+ result = response.result()
+ if isinstance(result, proto.Message):
+ response_payload = type(result).to_json(result)
+ elif isinstance(result, google.protobuf.message.Message):
+ response_payload = MessageToJson(result)
+ else:
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
+ grpc_response = {
+ "payload": response_payload,
+ "metadata": metadata,
+ "status": "OK",
+ }
+ _LOGGER.debug(
+ f"Received response for {client_call_details.method}.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": client_call_details.method,
+ "response": grpc_response,
+ "metadata": grpc_response["metadata"],
+ },
+ )
+ return response
+
class DatabaseAdminGrpcTransport(DatabaseAdminTransport):
"""gRPC backend transport for DatabaseAdmin.
@@ -43,10 +126,10 @@ class DatabaseAdminGrpcTransport(DatabaseAdminTransport):
The Cloud Spanner Database Admin API can be used to:
- - create, drop, and list databases
- - update the schema of pre-existing databases
- - create, delete and list backups for a database
- - restore a database from an existing backup
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -62,14 +145,14 @@ def __init__(
self,
*,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
- scopes: Sequence[str] = None,
- channel: grpc.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
@@ -79,20 +162,24 @@ def __init__(
Args:
host (Optional[str]):
- The hostname to connect to.
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- This argument is ignored if ``channel`` is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ This argument is ignored if a ``channel`` instance is provided.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
- ignored if ``channel`` is provided.
- channel (Optional[grpc.Channel]): A ``Channel`` instance through
- which to make calls.
+ ignored if a ``channel`` instance is provided.
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
+ A ``Channel`` instance through which to make calls, or a Callable
+ that constructs and returns one. If set to None, ``self.create_channel``
+ is used to create the channel. If a Callable is given, it will be called
+ with the same arguments as used in ``self.create_channel``.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
@@ -102,11 +189,11 @@ def __init__(
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
- for the grpc channel. It is ignored if ``channel`` is provided.
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
- ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -133,9 +220,10 @@ def __init__(
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
- if channel:
+ if isinstance(channel, grpc.Channel):
# Ignore credentials if a channel was passed.
- credentials = False
+ credentials = None
+ self._ignore_credentials = True
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
@@ -174,7 +262,9 @@ def __init__(
)
if not self._grpc_channel:
- self._grpc_channel = type(self).create_channel(
+ # initialize with the provided callable or the default channel
+ channel_init = channel or type(self).create_channel
+ self._grpc_channel = channel_init(
self._host,
# use the credentials which are saved
credentials=self._credentials,
@@ -187,18 +277,24 @@ def __init__(
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
+ ("grpc.keepalive_time_ms", 120000),
],
)
- # Wrap messages. This must be done after self._grpc_channel exists
+ self._interceptor = _LoggingClientInterceptor()
+ self._logged_channel = grpc.intercept_channel(
+ self._grpc_channel, self._interceptor
+ )
+
+ # Wrap messages. This must be done after self._logged_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
- credentials_file: str = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
@@ -211,9 +307,10 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -254,7 +351,9 @@ def operations_client(self) -> operations_v1.OperationsClient:
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
- self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+ self._operations_client = operations_v1.OperationsClient(
+ self._logged_channel
+ )
# Return the client from cache.
return self._operations_client
@@ -281,7 +380,7 @@ def list_databases(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_databases" not in self._stubs:
- self._stubs["list_databases"] = self.grpc_channel.unary_unary(
+ self._stubs["list_databases"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=spanner_database_admin.ListDatabasesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize,
@@ -318,7 +417,7 @@ def create_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_database" not in self._stubs:
- self._stubs["create_database"] = self.grpc_channel.unary_unary(
+ self._stubs["create_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -346,13 +445,78 @@ def get_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database" not in self._stubs:
- self._stubs["get_database"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=spanner_database_admin.GetDatabaseRequest.serialize,
response_deserializer=spanner_database_admin.Database.deserialize,
)
return self._stubs["get_database"]
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest], operations_pb2.Operation
+ ]:
+ r"""Return a callable for the update database method over gRPC.
+
+ Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ Returns:
+ Callable[[~.UpdateDatabaseRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_database" not in self._stubs:
+ self._stubs["update_database"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_database"]
+
@property
def update_database_ddl(
self,
@@ -382,7 +546,7 @@ def update_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_database_ddl" not in self._stubs:
- self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["update_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -411,7 +575,7 @@ def drop_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_database" not in self._stubs:
- self._stubs["drop_database"] = self.grpc_channel.unary_unary(
+ self._stubs["drop_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=spanner_database_admin.DropDatabaseRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -443,7 +607,7 @@ def get_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database_ddl" not in self._stubs:
- self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize,
response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize,
@@ -477,7 +641,7 @@ def set_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
- self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["set_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -512,7 +676,7 @@ def get_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
- self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["get_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -550,7 +714,7 @@ def test_iam_permissions(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
- self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
+ self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
@@ -588,7 +752,7 @@ def create_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
- self._stubs["create_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["create_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
request_serializer=gsad_backup.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -612,8 +776,8 @@ def copy_backup(
The [response][google.longrunning.Operation.response] field type
is [Backup][google.spanner.admin.database.v1.Backup], if
successful. Cancelling the returned operation will stop the
- copying and delete the backup. Concurrent CopyBackup requests
- can run on the same source backup.
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
Returns:
Callable[[~.CopyBackupRequest],
@@ -626,7 +790,7 @@ def copy_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "copy_backup" not in self._stubs:
- self._stubs["copy_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["copy_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
request_serializer=backup.CopyBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -651,7 +815,7 @@ def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
- self._stubs["get_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["get_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
request_serializer=backup.GetBackupRequest.serialize,
response_deserializer=backup.Backup.deserialize,
@@ -678,7 +842,7 @@ def update_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
- self._stubs["update_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["update_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
request_serializer=gsad_backup.UpdateBackupRequest.serialize,
response_deserializer=gsad_backup.Backup.deserialize,
@@ -703,7 +867,7 @@ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty_pb2.Empt
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
- self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
request_serializer=backup.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -731,7 +895,7 @@ def list_backups(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
- self._stubs["list_backups"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backups"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
request_serializer=backup.ListBackupsRequest.serialize,
response_deserializer=backup.ListBackupsResponse.deserialize,
@@ -777,7 +941,7 @@ def restore_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_database" not in self._stubs:
- self._stubs["restore_database"] = self.grpc_channel.unary_unary(
+ self._stubs["restore_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -815,7 +979,7 @@ def list_database_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_operations" not in self._stubs:
- self._stubs["list_database_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize,
@@ -854,7 +1018,7 @@ def list_backup_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backup_operations" not in self._stubs:
- self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backup_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
request_serializer=backup.ListBackupOperationsRequest.serialize,
response_deserializer=backup.ListBackupOperationsResponse.deserialize,
@@ -883,15 +1047,219 @@ def list_database_roles(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_roles" not in self._stubs:
- self._stubs["list_database_roles"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_roles"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize,
)
return self._stubs["list_database_roles"]
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ spanner_database_admin.AddSplitPointsResponse,
+ ]:
+ r"""Return a callable for the add split points method over gRPC.
+
+ Adds split points to specified tables, indexes of a
+ database.
+
+ Returns:
+ Callable[[~.AddSplitPointsRequest],
+ ~.AddSplitPointsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "add_split_points" not in self._stubs:
+ self._stubs["add_split_points"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints",
+ request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize,
+ response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize,
+ )
+ return self._stubs["add_split_points"]
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ r"""Return a callable for the create backup schedule method over gRPC.
+
+ Creates a new backup schedule.
+
+ Returns:
+ Callable[[~.CreateBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_backup_schedule" not in self._stubs:
+ self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["create_backup_schedule"]
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest], backup_schedule.BackupSchedule
+ ]:
+ r"""Return a callable for the get backup schedule method over gRPC.
+
+ Gets backup schedule for the input schedule name.
+
+ Returns:
+ Callable[[~.GetBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_backup_schedule" not in self._stubs:
+ self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ request_serializer=backup_schedule.GetBackupScheduleRequest.serialize,
+ response_deserializer=backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["get_backup_schedule"]
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ r"""Return a callable for the update backup schedule method over gRPC.
+
+ Updates a backup schedule.
+
+ Returns:
+ Callable[[~.UpdateBackupScheduleRequest],
+ ~.BackupSchedule]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_backup_schedule" not in self._stubs:
+ self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["update_backup_schedule"]
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[[backup_schedule.DeleteBackupScheduleRequest], empty_pb2.Empty]:
+ r"""Return a callable for the delete backup schedule method over gRPC.
+
+ Deletes a backup schedule.
+
+ Returns:
+ Callable[[~.DeleteBackupScheduleRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_backup_schedule" not in self._stubs:
+ self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_backup_schedule"]
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ backup_schedule.ListBackupSchedulesResponse,
+ ]:
+ r"""Return a callable for the list backup schedules method over gRPC.
+
+ Lists all the backup schedules for the database.
+
+ Returns:
+ Callable[[~.ListBackupSchedulesRequest],
+ ~.ListBackupSchedulesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_backup_schedules" not in self._stubs:
+ self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize,
+ response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize,
+ )
+ return self._stubs["list_backup_schedules"]
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ ]:
+ r"""Return a callable for the internal update graph
+ operation method over gRPC.
+
+ This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ Returns:
+ Callable[[~.InternalUpdateGraphOperationRequest],
+ ~.InternalUpdateGraphOperationResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "internal_update_graph_operation" not in self._stubs:
+ self._stubs[
+ "internal_update_graph_operation"
+ ] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation",
+ request_serializer=spanner_database_admin.InternalUpdateGraphOperationRequest.serialize,
+ response_deserializer=spanner_database_admin.InternalUpdateGraphOperationResponse.deserialize,
+ )
+ return self._stubs["internal_update_graph_operation"]
+
def close(self):
- self.grpc_channel.close()
+ self._logged_channel.close()
@property
def delete_operation(
@@ -903,7 +1271,7 @@ def delete_operation(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
- self._stubs["delete_operation"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_operation"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/DeleteOperation",
request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=None,
@@ -920,7 +1288,7 @@ def cancel_operation(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
- self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
@@ -937,7 +1305,7 @@ def get_operation(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
- self._stubs["get_operation"] = self.grpc_channel.unary_unary(
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
@@ -956,7 +1324,7 @@ def list_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
- self._stubs["list_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
index 40cb38cf28..145c6ebf03 100644
--- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,29 +13,117 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import inspect
+import json
+import pickle
+import logging as std_logging
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry_async as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.protobuf.json_format import MessageToJson
+import google.protobuf.message
import grpc # type: ignore
+import proto # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatabaseAdminGrpcTransport
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
+
+class _LoggingClientAIOInterceptor(
+ grpc.aio.UnaryUnaryClientInterceptor
+): # pragma: NO COVER
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ )
+ if logging_enabled: # pragma: NO COVER
+ request_metadata = client_call_details.metadata
+ if isinstance(request, proto.Message):
+ request_payload = type(request).to_json(request)
+ elif isinstance(request, google.protobuf.message.Message):
+ request_payload = MessageToJson(request)
+ else:
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
+
+ request_metadata = {
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
+ for key, value in request_metadata
+ }
+ grpc_request = {
+ "payload": request_payload,
+ "requestMethod": "grpc",
+ "metadata": dict(request_metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for {client_call_details.method}",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "request": grpc_request,
+ "metadata": grpc_request["metadata"],
+ },
+ )
+ response = await continuation(client_call_details, request)
+ if logging_enabled: # pragma: NO COVER
+ response_metadata = await response.trailing_metadata()
+ # Convert gRPC metadata `` to list of tuples
+ metadata = (
+ dict([(k, str(v)) for k, v in response_metadata])
+ if response_metadata
+ else None
+ )
+ result = await response
+ if isinstance(result, proto.Message):
+ response_payload = type(result).to_json(result)
+ elif isinstance(result, google.protobuf.message.Message):
+ response_payload = MessageToJson(result)
+ else:
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
+ grpc_response = {
+ "payload": response_payload,
+ "metadata": metadata,
+ "status": "OK",
+ }
+ _LOGGER.debug(
+ f"Received response to rpc {client_call_details.method}.",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": str(client_call_details.method),
+ "response": grpc_response,
+ "metadata": grpc_response["metadata"],
+ },
+ )
+ return response
+
class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
"""gRPC AsyncIO backend transport for DatabaseAdmin.
@@ -44,10 +132,10 @@ class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
The Cloud Spanner Database Admin API can be used to:
- - create, drop, and list databases
- - update the schema of pre-existing databases
- - create, delete and list backups for a database
- - restore a database from an existing backup
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
@@ -64,7 +152,7 @@ class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
def create_channel(
cls,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
@@ -78,9 +166,9 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -107,15 +195,15 @@ def __init__(
self,
*,
host: str = "spanner.googleapis.com",
- credentials: ga_credentials.Credentials = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- channel: aio.Channel = None,
- api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
- ssl_channel_credentials: grpc.ChannelCredentials = None,
- client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
- quota_project_id=None,
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
+ api_mtls_endpoint: Optional[str] = None,
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
@@ -124,21 +212,25 @@ def __init__(
Args:
host (Optional[str]):
- The hostname to connect to.
+ The hostname to connect to (default: 'spanner.googleapis.com').
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- This argument is ignored if ``channel`` is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ This argument is ignored if a ``channel`` instance is provided.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
- channel (Optional[aio.Channel]): A ``Channel`` instance through
- which to make calls.
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
+ A ``Channel`` instance through which to make calls, or a Callable
+ that constructs and returns one. If set to None, ``self.create_channel``
+ is used to create the channel. If a Callable is given, it will be called
+ with the same arguments as used in ``self.create_channel``.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
@@ -148,11 +240,11 @@ def __init__(
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
- for the grpc channel. It is ignored if ``channel`` is provided.
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
- ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -179,9 +271,10 @@ def __init__(
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
- if channel:
+ if isinstance(channel, aio.Channel):
# Ignore credentials if a channel was passed.
- credentials = False
+ credentials = None
+ self._ignore_credentials = True
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
@@ -219,7 +312,9 @@ def __init__(
)
if not self._grpc_channel:
- self._grpc_channel = type(self).create_channel(
+ # initialize with the provided callable or the default channel
+ channel_init = channel or type(self).create_channel
+ self._grpc_channel = channel_init(
self._host,
# use the credentials which are saved
credentials=self._credentials,
@@ -232,10 +327,17 @@ def __init__(
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
+ ("grpc.keepalive_time_ms", 120000),
],
)
- # Wrap messages. This must be done after self._grpc_channel exists
+ self._interceptor = _LoggingClientAIOInterceptor()
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
+ self._logged_channel = self._grpc_channel
+ self._wrap_with_kind = (
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
+ )
+ # Wrap messages. This must be done after self._logged_channel exists
self._prep_wrapped_messages(client_info)
@property
@@ -258,7 +360,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient:
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
- self.grpc_channel
+ self._logged_channel
)
# Return the client from cache.
@@ -286,7 +388,7 @@ def list_databases(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_databases" not in self._stubs:
- self._stubs["list_databases"] = self.grpc_channel.unary_unary(
+ self._stubs["list_databases"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=spanner_database_admin.ListDatabasesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize,
@@ -324,7 +426,7 @@ def create_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_database" not in self._stubs:
- self._stubs["create_database"] = self.grpc_channel.unary_unary(
+ self._stubs["create_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -353,13 +455,79 @@ def get_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database" not in self._stubs:
- self._stubs["get_database"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=spanner_database_admin.GetDatabaseRequest.serialize,
response_deserializer=spanner_database_admin.Database.deserialize,
)
return self._stubs["get_database"]
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest],
+ Awaitable[operations_pb2.Operation],
+ ]:
+ r"""Return a callable for the update database method over gRPC.
+
+ Updates a Cloud Spanner database. The returned [long-running
+ operation][google.longrunning.Operation] can be used to track
+ the progress of updating the database. If the named database
+ does not exist, returns ``NOT_FOUND``.
+
+ While the operation is pending:
+
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field is set to true.
+ - Cancelling the operation is best-effort. If the cancellation
+ succeeds, the operation metadata's
+ [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ is set, the updates are reverted, and the operation terminates
+ with a ``CANCELLED`` status.
+ - New UpdateDatabase requests will return a
+ ``FAILED_PRECONDITION`` error until the pending operation is
+ done (returns successfully or with error).
+ - Reading the database via the API continues to give the
+ pre-request values.
+
+ Upon completion of the returned operation:
+
+ - The new values are in effect and readable via the API.
+ - The database's
+ [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ field becomes false.
+
+ The returned [long-running
+ operation][google.longrunning.Operation] will have a name of the
+ format
+ ``projects//instances//databases//operations/``
+ and can be used to track the database modification. The
+ [metadata][google.longrunning.Operation.metadata] field type is
+ [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ The [response][google.longrunning.Operation.response] field type
+ is [Database][google.spanner.admin.database.v1.Database], if
+ successful.
+
+ Returns:
+ Callable[[~.UpdateDatabaseRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_database" not in self._stubs:
+ self._stubs["update_database"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_database"]
+
@property
def update_database_ddl(
self,
@@ -390,7 +558,7 @@ def update_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_database_ddl" not in self._stubs:
- self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["update_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -421,7 +589,7 @@ def drop_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_database" not in self._stubs:
- self._stubs["drop_database"] = self.grpc_channel.unary_unary(
+ self._stubs["drop_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=spanner_database_admin.DropDatabaseRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -453,7 +621,7 @@ def get_database_ddl(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database_ddl" not in self._stubs:
- self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary(
+ self._stubs["get_database_ddl"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize,
response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize,
@@ -487,7 +655,7 @@ def set_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
- self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["set_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -522,7 +690,7 @@ def get_iam_policy(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
- self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
+ self._stubs["get_iam_policy"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
@@ -560,7 +728,7 @@ def test_iam_permissions(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
- self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
+ self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
@@ -600,7 +768,7 @@ def create_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
- self._stubs["create_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["create_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
request_serializer=gsad_backup.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -624,8 +792,8 @@ def copy_backup(
The [response][google.longrunning.Operation.response] field type
is [Backup][google.spanner.admin.database.v1.Backup], if
successful. Cancelling the returned operation will stop the
- copying and delete the backup. Concurrent CopyBackup requests
- can run on the same source backup.
+ copying and delete the destination backup. Concurrent CopyBackup
+ requests can run on the same source backup.
Returns:
Callable[[~.CopyBackupRequest],
@@ -638,7 +806,7 @@ def copy_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "copy_backup" not in self._stubs:
- self._stubs["copy_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["copy_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
request_serializer=backup.CopyBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -665,7 +833,7 @@ def get_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
- self._stubs["get_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["get_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
request_serializer=backup.GetBackupRequest.serialize,
response_deserializer=backup.Backup.deserialize,
@@ -692,7 +860,7 @@ def update_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
- self._stubs["update_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["update_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
request_serializer=gsad_backup.UpdateBackupRequest.serialize,
response_deserializer=gsad_backup.Backup.deserialize,
@@ -719,7 +887,7 @@ def delete_backup(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
- self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_backup"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
request_serializer=backup.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
@@ -747,7 +915,7 @@ def list_backups(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
- self._stubs["list_backups"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backups"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
request_serializer=backup.ListBackupsRequest.serialize,
response_deserializer=backup.ListBackupsResponse.deserialize,
@@ -794,7 +962,7 @@ def restore_database(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_database" not in self._stubs:
- self._stubs["restore_database"] = self.grpc_channel.unary_unary(
+ self._stubs["restore_database"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
@@ -832,7 +1000,7 @@ def list_database_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_operations" not in self._stubs:
- self._stubs["list_database_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize,
@@ -872,7 +1040,7 @@ def list_backup_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backup_operations" not in self._stubs:
- self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_backup_operations"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
request_serializer=backup.ListBackupOperationsRequest.serialize,
response_deserializer=backup.ListBackupOperationsResponse.deserialize,
@@ -901,15 +1069,591 @@ def list_database_roles(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_roles" not in self._stubs:
- self._stubs["list_database_roles"] = self.grpc_channel.unary_unary(
+ self._stubs["list_database_roles"] = self._logged_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize,
)
return self._stubs["list_database_roles"]
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ Awaitable[spanner_database_admin.AddSplitPointsResponse],
+ ]:
+ r"""Return a callable for the add split points method over gRPC.
+
+ Adds split points to specified tables, indexes of a
+ database.
+
+ Returns:
+ Callable[[~.AddSplitPointsRequest],
+ Awaitable[~.AddSplitPointsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "add_split_points" not in self._stubs:
+ self._stubs["add_split_points"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints",
+ request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize,
+ response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize,
+ )
+ return self._stubs["add_split_points"]
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the create backup schedule method over gRPC.
+
+ Creates a new backup schedule.
+
+ Returns:
+ Callable[[~.CreateBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_backup_schedule" not in self._stubs:
+ self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["create_backup_schedule"]
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest],
+ Awaitable[backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the get backup schedule method over gRPC.
+
+ Gets backup schedule for the input schedule name.
+
+ Returns:
+ Callable[[~.GetBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_backup_schedule" not in self._stubs:
+ self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ request_serializer=backup_schedule.GetBackupScheduleRequest.serialize,
+ response_deserializer=backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["get_backup_schedule"]
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ Awaitable[gsad_backup_schedule.BackupSchedule],
+ ]:
+ r"""Return a callable for the update backup schedule method over gRPC.
+
+ Updates a backup schedule.
+
+ Returns:
+ Callable[[~.UpdateBackupScheduleRequest],
+ Awaitable[~.BackupSchedule]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_backup_schedule" not in self._stubs:
+ self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize,
+ response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize,
+ )
+ return self._stubs["update_backup_schedule"]
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.DeleteBackupScheduleRequest], Awaitable[empty_pb2.Empty]
+ ]:
+ r"""Return a callable for the delete backup schedule method over gRPC.
+
+ Deletes a backup schedule.
+
+ Returns:
+ Callable[[~.DeleteBackupScheduleRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_backup_schedule" not in self._stubs:
+ self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_backup_schedule"]
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ Awaitable[backup_schedule.ListBackupSchedulesResponse],
+ ]:
+ r"""Return a callable for the list backup schedules method over gRPC.
+
+ Lists all the backup schedules for the database.
+
+ Returns:
+ Callable[[~.ListBackupSchedulesRequest],
+ Awaitable[~.ListBackupSchedulesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_backup_schedules" not in self._stubs:
+ self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize,
+ response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize,
+ )
+ return self._stubs["list_backup_schedules"]
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ Awaitable[spanner_database_admin.InternalUpdateGraphOperationResponse],
+ ]:
+ r"""Return a callable for the internal update graph
+ operation method over gRPC.
+
+ This is an internal API called by Spanner Graph jobs.
+ You should never need to call this API directly.
+
+ Returns:
+ Callable[[~.InternalUpdateGraphOperationRequest],
+ Awaitable[~.InternalUpdateGraphOperationResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "internal_update_graph_operation" not in self._stubs:
+ self._stubs[
+ "internal_update_graph_operation"
+ ] = self._logged_channel.unary_unary(
+ "/google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation",
+ request_serializer=spanner_database_admin.InternalUpdateGraphOperationRequest.serialize,
+ response_deserializer=spanner_database_admin.InternalUpdateGraphOperationResponse.deserialize,
+ )
+ return self._stubs["internal_update_graph_operation"]
+
+ def _prep_wrapped_messages(self, client_info):
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
+ self._wrapped_methods = {
+ self.list_databases: self._wrap_method(
+ self.list_databases,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_database: self._wrap_method(
+ self.create_database,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_database: self._wrap_method(
+ self.get_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_database: self._wrap_method(
+ self.update_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_database_ddl: self._wrap_method(
+ self.update_database_ddl,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.drop_database: self._wrap_method(
+ self.drop_database,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_database_ddl: self._wrap_method(
+ self.get_database_ddl,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.set_iam_policy: self._wrap_method(
+ self.set_iam_policy,
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.get_iam_policy: self._wrap_method(
+ self.get_iam_policy,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=30.0,
+ ),
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.test_iam_permissions: self._wrap_method(
+ self.test_iam_permissions,
+ default_timeout=30.0,
+ client_info=client_info,
+ ),
+ self.create_backup: self._wrap_method(
+ self.create_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.copy_backup: self._wrap_method(
+ self.copy_backup,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup: self._wrap_method(
+ self.get_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup: self._wrap_method(
+ self.update_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup: self._wrap_method(
+ self.delete_backup,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backups: self._wrap_method(
+ self.list_backups,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.restore_database: self._wrap_method(
+ self.restore_database,
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_database_operations: self._wrap_method(
+ self.list_database_operations,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_operations: self._wrap_method(
+ self.list_backup_operations,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_database_roles: self._wrap_method(
+ self.list_database_roles,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.add_split_points: self._wrap_method(
+ self.add_split_points,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.create_backup_schedule: self._wrap_method(
+ self.create_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.get_backup_schedule: self._wrap_method(
+ self.get_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.update_backup_schedule: self._wrap_method(
+ self.update_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.delete_backup_schedule: self._wrap_method(
+ self.delete_backup_schedule,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.list_backup_schedules: self._wrap_method(
+ self.list_backup_schedules,
+ default_retry=retries.AsyncRetry(
+ initial=1.0,
+ maximum=32.0,
+ multiplier=1.3,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=3600.0,
+ ),
+ default_timeout=3600.0,
+ client_info=client_info,
+ ),
+ self.internal_update_graph_operation: self._wrap_method(
+ self.internal_update_graph_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_operation: self._wrap_method(
+ self.cancel_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_operation: self._wrap_method(
+ self.delete_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_operation: self._wrap_method(
+ self.get_operation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_operations: self._wrap_method(
+ self.list_operations,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ def _wrap_method(self, func, *args, **kwargs):
+ if self._wrap_with_kind: # pragma: NO COVER
+ kwargs["kind"] = self.kind
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
+
def close(self):
- return self.grpc_channel.close()
+ return self._logged_channel.close()
+
+ @property
+ def kind(self) -> str:
+ return "grpc_asyncio"
@property
def delete_operation(
@@ -921,7 +1665,7 @@ def delete_operation(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
- self._stubs["delete_operation"] = self.grpc_channel.unary_unary(
+ self._stubs["delete_operation"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/DeleteOperation",
request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=None,
@@ -938,7 +1682,7 @@ def cancel_operation(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
- self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
@@ -955,7 +1699,7 @@ def get_operation(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
- self._stubs["get_operation"] = self.grpc_channel.unary_unary(
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
@@ -974,7 +1718,7 @@ def list_operations(
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
- self._stubs["list_operations"] = self.grpc_channel.unary_unary(
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
new file mode 100644
index 0000000000..dfec442041
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py
@@ -0,0 +1,6551 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import logging
+import json # type: ignore
+
+from google.auth.transport.requests import AuthorizedSession # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.api_core import exceptions as core_exceptions
+from google.api_core import retry as retries
+from google.api_core import rest_helpers
+from google.api_core import rest_streaming
+from google.api_core import gapic_v1
+import google.protobuf
+
+from google.protobuf import json_format
+from google.api_core import operations_v1
+
+from requests import __version__ as requests_version
+import dataclasses
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+import warnings
+
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
+from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+
+
+from .rest_base import _BaseDatabaseAdminRestTransport
+from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = logging.getLogger(__name__)
+
+DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
+ grpc_version=None,
+ rest_version=f"requests@{requests_version}",
+)
+
+if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
+ DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
+
+
+class DatabaseAdminRestInterceptor:
+ """Interceptor for DatabaseAdmin.
+
+ Interceptors are used to manipulate requests, request metadata, and responses
+ in arbitrary ways.
+ Example use cases include:
+ * Logging
+ * Verifying requests according to service or custom semantics
+ * Stripping extraneous information from responses
+
+ These use cases and more can be enabled by injecting an
+ instance of a custom subclass when constructing the DatabaseAdminRestTransport.
+
+ .. code-block:: python
+ class MyCustomDatabaseAdminInterceptor(DatabaseAdminRestInterceptor):
+ def pre_add_split_points(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_add_split_points(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_copy_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_copy_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_create_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_delete_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_delete_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_drop_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def pre_get_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_database_ddl(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_database_ddl(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_get_iam_policy(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_iam_policy(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_internal_update_graph_operation(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_internal_update_graph_operation(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backup_operations(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backup_operations(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backups(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backups(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_backup_schedules(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_backup_schedules(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_database_operations(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_database_operations(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_database_roles(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_database_roles(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_list_databases(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_databases(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_restore_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_restore_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_set_iam_policy(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_set_iam_policy(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_test_iam_permissions(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_test_iam_permissions(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_backup_schedule(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_backup_schedule(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_database(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_database(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ def pre_update_database_ddl(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_database_ddl(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
+ transport = DatabaseAdminRestTransport(interceptor=MyCustomDatabaseAdminInterceptor())
+ client = DatabaseAdminClient(transport=transport)
+
+
+ """
+
+ def pre_add_split_points(
+ self,
+ request: spanner_database_admin.AddSplitPointsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.AddSplitPointsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for add_split_points
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_add_split_points(
+ self, response: spanner_database_admin.AddSplitPointsResponse
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ """Post-rpc interceptor for add_split_points
+
+ DEPRECATED. Please use the `post_add_split_points_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_add_split_points` interceptor runs
+ before the `post_add_split_points_with_metadata` interceptor.
+ """
+ return response
+
+ def post_add_split_points_with_metadata(
+ self,
+ response: spanner_database_admin.AddSplitPointsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.AddSplitPointsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for add_split_points
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_add_split_points_with_metadata`
+ interceptor in new development instead of the `post_add_split_points` interceptor.
+ When both interceptors are used, this `post_add_split_points_with_metadata` interceptor runs after the
+ `post_add_split_points` interceptor. The (possibly modified) response returned by
+ `post_add_split_points` will be passed to
+ `post_add_split_points_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_copy_backup(
+ self,
+ request: backup.CopyBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for copy_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_copy_backup(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for copy_backup
+
+ DEPRECATED. Please use the `post_copy_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_copy_backup` interceptor runs
+ before the `post_copy_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_copy_backup_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for copy_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_copy_backup_with_metadata`
+ interceptor in new development instead of the `post_copy_backup` interceptor.
+ When both interceptors are used, this `post_copy_backup_with_metadata` interceptor runs after the
+ `post_copy_backup` interceptor. The (possibly modified) response returned by
+ `post_copy_backup` will be passed to
+ `post_copy_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_backup(
+ self,
+ request: gsad_backup.CreateBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup.CreateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for create_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_backup(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for create_backup
+
+ DEPRECATED. Please use the `post_create_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_backup` interceptor runs
+ before the `post_create_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_backup_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for create_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_backup_with_metadata`
+ interceptor in new development instead of the `post_create_backup` interceptor.
+ When both interceptors are used, this `post_create_backup_with_metadata` interceptor runs after the
+ `post_create_backup` interceptor. The (possibly modified) response returned by
+ `post_create_backup` will be passed to
+ `post_create_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_backup_schedule(
+ self,
+ request: gsad_backup_schedule.CreateBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.CreateBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for create_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_backup_schedule(
+ self, response: gsad_backup_schedule.BackupSchedule
+ ) -> gsad_backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for create_backup_schedule
+
+ DEPRECATED. Please use the `post_create_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_backup_schedule` interceptor runs
+ before the `post_create_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_backup_schedule_with_metadata(
+ self,
+ response: gsad_backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for create_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_create_backup_schedule` interceptor.
+ When both interceptors are used, this `post_create_backup_schedule_with_metadata` interceptor runs after the
+ `post_create_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_create_backup_schedule` will be passed to
+ `post_create_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_create_database(
+ self,
+ request: spanner_database_admin.CreateDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.CreateDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for create_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_create_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for create_database
+
+ DEPRECATED. Please use the `post_create_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_create_database` interceptor runs
+ before the `post_create_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for create_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_database_with_metadata`
+ interceptor in new development instead of the `post_create_database` interceptor.
+ When both interceptors are used, this `post_create_database_with_metadata` interceptor runs after the
+ `post_create_database` interceptor. The (possibly modified) response returned by
+ `post_create_database` will be passed to
+ `post_create_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_delete_backup(
+ self,
+ request: backup.DeleteBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.DeleteBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for delete_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_delete_backup_schedule(
+ self,
+ request: backup_schedule.DeleteBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.DeleteBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for delete_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_drop_database(
+ self,
+ request: spanner_database_admin.DropDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.DropDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for drop_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def pre_get_backup(
+ self,
+ request: backup.GetBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for get_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_backup(self, response: backup.Backup) -> backup.Backup:
+ """Post-rpc interceptor for get_backup
+
+ DEPRECATED. Please use the `post_get_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_backup` interceptor runs
+ before the `post_get_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_backup_with_metadata(
+ self, response: backup.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]]
+ ) -> Tuple[backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_backup_with_metadata`
+ interceptor in new development instead of the `post_get_backup` interceptor.
+ When both interceptors are used, this `post_get_backup_with_metadata` interceptor runs after the
+ `post_get_backup` interceptor. The (possibly modified) response returned by
+ `post_get_backup` will be passed to
+ `post_get_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_backup_schedule(
+ self,
+ request: backup_schedule.GetBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.GetBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_backup_schedule(
+ self, response: backup_schedule.BackupSchedule
+ ) -> backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for get_backup_schedule
+
+ DEPRECATED. Please use the `post_get_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_backup_schedule` interceptor runs
+ before the `post_get_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_backup_schedule_with_metadata(
+ self,
+ response: backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_get_backup_schedule` interceptor.
+ When both interceptors are used, this `post_get_backup_schedule_with_metadata` interceptor runs after the
+ `post_get_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_get_backup_schedule` will be passed to
+ `post_get_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_database(
+ self,
+ request: spanner_database_admin.GetDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_database(
+ self, response: spanner_database_admin.Database
+ ) -> spanner_database_admin.Database:
+ """Post-rpc interceptor for get_database
+
+ DEPRECATED. Please use the `post_get_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_database` interceptor runs
+ before the `post_get_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_database_with_metadata(
+ self,
+ response: spanner_database_admin.Database,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.Database, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for get_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_database_with_metadata`
+ interceptor in new development instead of the `post_get_database` interceptor.
+ When both interceptors are used, this `post_get_database_with_metadata` interceptor runs after the
+ `post_get_database` interceptor. The (possibly modified) response returned by
+ `post_get_database` will be passed to
+ `post_get_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_database_ddl(
+ self,
+ request: spanner_database_admin.GetDatabaseDdlRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseDdlRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_database_ddl
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_database_ddl(
+ self, response: spanner_database_admin.GetDatabaseDdlResponse
+ ) -> spanner_database_admin.GetDatabaseDdlResponse:
+ """Post-rpc interceptor for get_database_ddl
+
+ DEPRECATED. Please use the `post_get_database_ddl_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_database_ddl` interceptor runs
+ before the `post_get_database_ddl_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_database_ddl_with_metadata(
+ self,
+ response: spanner_database_admin.GetDatabaseDdlResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.GetDatabaseDdlResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for get_database_ddl
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_database_ddl_with_metadata`
+ interceptor in new development instead of the `post_get_database_ddl` interceptor.
+ When both interceptors are used, this `post_get_database_ddl_with_metadata` interceptor runs after the
+ `post_get_database_ddl` interceptor. The (possibly modified) response returned by
+ `post_get_database_ddl` will be passed to
+ `post_get_database_ddl_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_get_iam_policy(
+ self,
+ request: iam_policy_pb2.GetIamPolicyRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for get_iam_policy
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
+ """Post-rpc interceptor for get_iam_policy
+
+ DEPRECATED. Please use the `post_get_iam_policy_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_get_iam_policy` interceptor runs
+ before the `post_get_iam_policy_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_iam_policy_with_metadata(
+ self,
+ response: policy_pb2.Policy,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_iam_policy
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_iam_policy_with_metadata`
+ interceptor in new development instead of the `post_get_iam_policy` interceptor.
+ When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the
+ `post_get_iam_policy` interceptor. The (possibly modified) response returned by
+ `post_get_iam_policy` will be passed to
+ `post_get_iam_policy_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backup_operations(
+ self,
+ request: backup.ListBackupOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup.ListBackupOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for list_backup_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backup_operations(
+ self, response: backup.ListBackupOperationsResponse
+ ) -> backup.ListBackupOperationsResponse:
+ """Post-rpc interceptor for list_backup_operations
+
+ DEPRECATED. Please use the `post_list_backup_operations_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backup_operations` interceptor runs
+ before the `post_list_backup_operations_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backup_operations_with_metadata(
+ self,
+ response: backup.ListBackupOperationsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup.ListBackupOperationsResponse, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for list_backup_operations
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backup_operations_with_metadata`
+ interceptor in new development instead of the `post_list_backup_operations` interceptor.
+ When both interceptors are used, this `post_list_backup_operations_with_metadata` interceptor runs after the
+ `post_list_backup_operations` interceptor. The (possibly modified) response returned by
+ `post_list_backup_operations` will be passed to
+ `post_list_backup_operations_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backups(
+ self,
+ request: backup.ListBackupsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Pre-rpc interceptor for list_backups
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backups(
+ self, response: backup.ListBackupsResponse
+ ) -> backup.ListBackupsResponse:
+ """Post-rpc interceptor for list_backups
+
+ DEPRECATED. Please use the `post_list_backups_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backups` interceptor runs
+ before the `post_list_backups_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backups_with_metadata(
+ self,
+ response: backup.ListBackupsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[backup.ListBackupsResponse, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for list_backups
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backups_with_metadata`
+ interceptor in new development instead of the `post_list_backups` interceptor.
+ When both interceptors are used, this `post_list_backups_with_metadata` interceptor runs after the
+ `post_list_backups` interceptor. The (possibly modified) response returned by
+ `post_list_backups` will be passed to
+ `post_list_backups_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_backup_schedules(
+ self,
+ request: backup_schedule.ListBackupSchedulesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.ListBackupSchedulesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_backup_schedules
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_backup_schedules(
+ self, response: backup_schedule.ListBackupSchedulesResponse
+ ) -> backup_schedule.ListBackupSchedulesResponse:
+ """Post-rpc interceptor for list_backup_schedules
+
+ DEPRECATED. Please use the `post_list_backup_schedules_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_backup_schedules` interceptor runs
+ before the `post_list_backup_schedules_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_backup_schedules_with_metadata(
+ self,
+ response: backup_schedule.ListBackupSchedulesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ backup_schedule.ListBackupSchedulesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_backup_schedules
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_backup_schedules_with_metadata`
+ interceptor in new development instead of the `post_list_backup_schedules` interceptor.
+ When both interceptors are used, this `post_list_backup_schedules_with_metadata` interceptor runs after the
+ `post_list_backup_schedules` interceptor. The (possibly modified) response returned by
+ `post_list_backup_schedules` will be passed to
+ `post_list_backup_schedules_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_database_operations(
+ self,
+ request: spanner_database_admin.ListDatabaseOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseOperationsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_database_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_database_operations(
+ self, response: spanner_database_admin.ListDatabaseOperationsResponse
+ ) -> spanner_database_admin.ListDatabaseOperationsResponse:
+ """Post-rpc interceptor for list_database_operations
+
+ DEPRECATED. Please use the `post_list_database_operations_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_database_operations` interceptor runs
+ before the `post_list_database_operations_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_database_operations_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabaseOperationsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseOperationsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_database_operations
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_database_operations_with_metadata`
+ interceptor in new development instead of the `post_list_database_operations` interceptor.
+ When both interceptors are used, this `post_list_database_operations_with_metadata` interceptor runs after the
+ `post_list_database_operations` interceptor. The (possibly modified) response returned by
+ `post_list_database_operations` will be passed to
+ `post_list_database_operations_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_database_roles(
+ self,
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseRolesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_database_roles
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_database_roles(
+ self, response: spanner_database_admin.ListDatabaseRolesResponse
+ ) -> spanner_database_admin.ListDatabaseRolesResponse:
+ """Post-rpc interceptor for list_database_roles
+
+ DEPRECATED. Please use the `post_list_database_roles_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_database_roles` interceptor runs
+ before the `post_list_database_roles_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_database_roles_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabaseRolesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabaseRolesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_database_roles
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_database_roles_with_metadata`
+ interceptor in new development instead of the `post_list_database_roles` interceptor.
+ When both interceptors are used, this `post_list_database_roles_with_metadata` interceptor runs after the
+ `post_list_database_roles` interceptor. The (possibly modified) response returned by
+ `post_list_database_roles` will be passed to
+ `post_list_database_roles_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_list_databases(
+ self,
+ request: spanner_database_admin.ListDatabasesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabasesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_databases
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_databases(
+ self, response: spanner_database_admin.ListDatabasesResponse
+ ) -> spanner_database_admin.ListDatabasesResponse:
+ """Post-rpc interceptor for list_databases
+
+ DEPRECATED. Please use the `post_list_databases_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_list_databases` interceptor runs
+ before the `post_list_databases_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_databases_with_metadata(
+ self,
+ response: spanner_database_admin.ListDatabasesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.ListDatabasesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_databases
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_databases_with_metadata`
+ interceptor in new development instead of the `post_list_databases` interceptor.
+ When both interceptors are used, this `post_list_databases_with_metadata` interceptor runs after the
+ `post_list_databases` interceptor. The (possibly modified) response returned by
+ `post_list_databases` will be passed to
+ `post_list_databases_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_restore_database(
+ self,
+ request: spanner_database_admin.RestoreDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.RestoreDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for restore_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_restore_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for restore_database
+
+ DEPRECATED. Please use the `post_restore_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_restore_database` interceptor runs
+ before the `post_restore_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_restore_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for restore_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_restore_database_with_metadata`
+ interceptor in new development instead of the `post_restore_database` interceptor.
+ When both interceptors are used, this `post_restore_database_with_metadata` interceptor runs after the
+ `post_restore_database` interceptor. The (possibly modified) response returned by
+ `post_restore_database` will be passed to
+ `post_restore_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_set_iam_policy(
+ self,
+ request: iam_policy_pb2.SetIamPolicyRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for set_iam_policy
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:
+ """Post-rpc interceptor for set_iam_policy
+
+ DEPRECATED. Please use the `post_set_iam_policy_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_set_iam_policy` interceptor runs
+ before the `post_set_iam_policy_with_metadata` interceptor.
+ """
+ return response
+
+ def post_set_iam_policy_with_metadata(
+ self,
+ response: policy_pb2.Policy,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for set_iam_policy
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_set_iam_policy_with_metadata`
+ interceptor in new development instead of the `post_set_iam_policy` interceptor.
+ When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the
+ `post_set_iam_policy` interceptor. The (possibly modified) response returned by
+ `post_set_iam_policy` will be passed to
+ `post_set_iam_policy_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_test_iam_permissions(
+ self,
+ request: iam_policy_pb2.TestIamPermissionsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.TestIamPermissionsRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for test_iam_permissions
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_test_iam_permissions(
+ self, response: iam_policy_pb2.TestIamPermissionsResponse
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ """Post-rpc interceptor for test_iam_permissions
+
+ DEPRECATED. Please use the `post_test_iam_permissions_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_test_iam_permissions` interceptor runs
+ before the `post_test_iam_permissions_with_metadata` interceptor.
+ """
+ return response
+
+ def post_test_iam_permissions_with_metadata(
+ self,
+ response: iam_policy_pb2.TestIamPermissionsResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ iam_policy_pb2.TestIamPermissionsResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for test_iam_permissions
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_test_iam_permissions_with_metadata`
+ interceptor in new development instead of the `post_test_iam_permissions` interceptor.
+ When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the
+ `post_test_iam_permissions` interceptor. The (possibly modified) response returned by
+ `post_test_iam_permissions` will be passed to
+ `post_test_iam_permissions_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_backup(
+ self,
+ request: gsad_backup.UpdateBackupRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup.UpdateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for update_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_backup(self, response: gsad_backup.Backup) -> gsad_backup.Backup:
+ """Post-rpc interceptor for update_backup
+
+ DEPRECATED. Please use the `post_update_backup_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_backup` interceptor runs
+ before the `post_update_backup_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_backup_with_metadata(
+ self,
+ response: gsad_backup.Backup,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[gsad_backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_backup
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_backup_with_metadata`
+ interceptor in new development instead of the `post_update_backup` interceptor.
+ When both interceptors are used, this `post_update_backup_with_metadata` interceptor runs after the
+ `post_update_backup` interceptor. The (possibly modified) response returned by
+ `post_update_backup` will be passed to
+ `post_update_backup_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_backup_schedule(
+ self,
+ request: gsad_backup_schedule.UpdateBackupScheduleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.UpdateBackupScheduleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_backup_schedule
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_backup_schedule(
+ self, response: gsad_backup_schedule.BackupSchedule
+ ) -> gsad_backup_schedule.BackupSchedule:
+ """Post-rpc interceptor for update_backup_schedule
+
+ DEPRECATED. Please use the `post_update_backup_schedule_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_backup_schedule` interceptor runs
+ before the `post_update_backup_schedule_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_backup_schedule_with_metadata(
+ self,
+ response: gsad_backup_schedule.BackupSchedule,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Post-rpc interceptor for update_backup_schedule
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_backup_schedule_with_metadata`
+ interceptor in new development instead of the `post_update_backup_schedule` interceptor.
+ When both interceptors are used, this `post_update_backup_schedule_with_metadata` interceptor runs after the
+ `post_update_backup_schedule` interceptor. The (possibly modified) response returned by
+ `post_update_backup_schedule` will be passed to
+ `post_update_backup_schedule_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_database(
+ self,
+ request: spanner_database_admin.UpdateDatabaseRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.UpdateDatabaseRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_database
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_database(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for update_database
+
+ DEPRECATED. Please use the `post_update_database_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_database` interceptor runs
+ before the `post_update_database_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_database_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_database
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_database_with_metadata`
+ interceptor in new development instead of the `post_update_database` interceptor.
+ When both interceptors are used, this `post_update_database_with_metadata` interceptor runs after the
+ `post_update_database` interceptor. The (possibly modified) response returned by
+ `post_update_database` will be passed to
+ `post_update_database_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_update_database_ddl(
+ self,
+ request: spanner_database_admin.UpdateDatabaseDdlRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ spanner_database_admin.UpdateDatabaseDdlRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_database_ddl
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_update_database_ddl(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for update_database_ddl
+
+ DEPRECATED. Please use the `post_update_database_ddl_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code. This `post_update_database_ddl` interceptor runs
+ before the `post_update_database_ddl_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_database_ddl_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_database_ddl
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the DatabaseAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_database_ddl_with_metadata`
+ interceptor in new development instead of the `post_update_database_ddl` interceptor.
+ When both interceptors are used, this `post_update_database_ddl_with_metadata` interceptor runs after the
+ `post_update_database_ddl` interceptor. The (possibly modified) response returned by
+ `post_update_database_ddl` will be passed to
+ `post_update_database_ddl_with_metadata`.
+ """
+ return response, metadata
+
+ def pre_cancel_operation(
+ self,
+ request: operations_pb2.CancelOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for cancel_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_cancel_operation(self, response: None) -> None:
+ """Post-rpc interceptor for cancel_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_delete_operation(
+ self,
+ request: operations_pb2.DeleteOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for delete_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_delete_operation(self, response: None) -> None:
+ """Post-rpc interceptor for delete_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_get_operation(
+ self,
+ request: operations_pb2.GetOperationRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for get_operation
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_get_operation(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for get_operation
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+ def pre_list_operations(
+ self,
+ request: operations_pb2.ListOperationsRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
+ ]:
+ """Pre-rpc interceptor for list_operations
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the DatabaseAdmin server.
+ """
+ return request, metadata
+
+ def post_list_operations(
+ self, response: operations_pb2.ListOperationsResponse
+ ) -> operations_pb2.ListOperationsResponse:
+ """Post-rpc interceptor for list_operations
+
+ Override in a subclass to manipulate the response
+ after it is returned by the DatabaseAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
+
+@dataclasses.dataclass
+class DatabaseAdminRestStub:
+ _session: AuthorizedSession
+ _host: str
+ _interceptor: DatabaseAdminRestInterceptor
+
+
+class DatabaseAdminRestTransport(_BaseDatabaseAdminRestTransport):
+ """REST backend synchronous transport for DatabaseAdmin.
+
+ Cloud Spanner Database Admin API
+
+ The Cloud Spanner Database Admin API can be used to:
+
+ - create, drop, and list databases
+ - update the schema of pre-existing databases
+ - create, delete, copy and list backups for a database
+ - restore a database from an existing backup
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends JSON representations of protocol buffers over HTTP/1.1
+ """
+
+ def __init__(
+ self,
+ *,
+ host: str = "spanner.googleapis.com",
+ credentials: Optional[ga_credentials.Credentials] = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ url_scheme: str = "https",
+ interceptor: Optional[DatabaseAdminRestInterceptor] = None,
+ api_audience: Optional[str] = None,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
+ certificate to configure mutual TLS HTTP channel. It is ignored
+ if ``channel`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you are developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+ url_scheme: the protocol scheme for the API endpoint. Normally
+ "https", but for testing or local servers,
+ "http" can be specified.
+ """
+ # Run the base constructor
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
+ # credentials object
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ url_scheme=url_scheme,
+ api_audience=api_audience,
+ )
+ self._session = AuthorizedSession(
+ self._credentials, default_host=self.DEFAULT_HOST
+ )
+ self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None
+ if client_cert_source_for_mtls:
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
+ self._interceptor = interceptor or DatabaseAdminRestInterceptor()
+ self._prep_wrapped_messages(client_info)
+
+ @property
+ def operations_client(self) -> operations_v1.AbstractOperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ http_options: Dict[str, List[Dict[str, str]]] = {
+ "google.longrunning.Operations.CancelOperation": [
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel",
+ },
+ ],
+ "google.longrunning.Operations.DeleteOperation": [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ],
+ "google.longrunning.Operations.GetOperation": [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ],
+ "google.longrunning.Operations.ListOperations": [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations}",
+ },
+ ],
+ }
+
+ rest_transport = operations_v1.OperationsRestTransport(
+ host=self._host,
+ # use the credentials which are saved
+ credentials=self._credentials,
+ scopes=self._scopes,
+ http_options=http_options,
+ path_prefix="v1",
+ )
+
+ self._operations_client = operations_v1.AbstractOperationsClient(
+ transport=rest_transport
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ class _AddSplitPoints(
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.AddSplitPoints")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.AddSplitPointsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.AddSplitPointsResponse:
+ r"""Call the add split points method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.AddSplitPointsRequest):
+ The request object. The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.AddSplitPointsResponse:
+ The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_add_split_points(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.AddSplitPoints",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "AddSplitPoints",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._AddSplitPoints._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.AddSplitPointsResponse()
+ pb_resp = spanner_database_admin.AddSplitPointsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_add_split_points(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_add_split_points_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.AddSplitPointsResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.add_split_points",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "AddSplitPoints",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CopyBackup(
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CopyBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.CopyBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the copy backup method over HTTP.
+
+ Args:
+ request (~.backup.CopyBackupRequest):
+ The request object. The request for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_copy_backup(request, metadata)
+ transcoded_request = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_transcoded_request(
+ http_options, request
+ )
+ )
+
+ body = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_request_body_json(
+ transcoded_request
+ )
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CopyBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CopyBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CopyBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_copy_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_copy_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.copy_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CopyBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateBackup(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup.CreateBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the create backup method over HTTP.
+
+ Args:
+ request (~.gsad_backup.CreateBackupRequest):
+ The request object. The request for
+ [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup_schedule.CreateBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Call the create backup schedule method over HTTP.
+
+ Args:
+ request (~.gsad_backup_schedule.CreateBackupScheduleRequest):
+ The request object. The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup_schedule.BackupSchedule()
+ pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup_schedule.BackupSchedule.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateDatabase(
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CreateDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.CreateDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the create database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.CreateDatabaseRequest):
+ The request object. The request for
+ [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CreateDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CreateDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _DeleteBackup(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.DeleteBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the delete backup method over HTTP.
+
+ Args:
+ request (~.backup.DeleteBackupRequest):
+ The request object. The request for
+ [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _DeleteBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.DeleteBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the delete backup schedule method over HTTP.
+
+ Args:
+ request (~.backup_schedule.DeleteBackupScheduleRequest):
+ The request object. The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _DropDatabase(
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DropDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.DropDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the drop database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.DropDatabaseRequest):
+ The request object. The request for
+ [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_drop_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DropDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DropDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DropDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ class _GetBackup(
+ _BaseDatabaseAdminRestTransport._BaseGetBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.GetBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.Backup:
+ r"""Call the get backup method over HTTP.
+
+ Args:
+ request (~.backup.GetBackupRequest):
+ The request object. The request for
+ [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.Backup:
+ A backup of a Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_backup(request, metadata)
+ transcoded_request = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_transcoded_request(
+ http_options, request
+ )
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.Backup()
+ pb_resp = backup.Backup.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.Backup.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.GetBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.BackupSchedule:
+ r"""Call the get backup schedule method over HTTP.
+
+ Args:
+ request (~.backup_schedule.GetBackupScheduleRequest):
+ The request object. The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup_schedule.BackupSchedule()
+ pb_resp = backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup_schedule.BackupSchedule.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetDatabase(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.GetDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.Database:
+ r"""Call the get database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.GetDatabaseRequest):
+ The request object. The request for
+ [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.Database:
+ A Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.Database()
+ pb_resp = spanner_database_admin.Database.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = spanner_database_admin.Database.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetDatabaseDdl(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetDatabaseDdl")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.GetDatabaseDdlRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.GetDatabaseDdlResponse:
+ r"""Call the get database ddl method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.GetDatabaseDdlRequest):
+ The request object. The request for
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.GetDatabaseDdlResponse:
+ The response for
+ [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_database_ddl(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabaseDdl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabaseDdl",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetDatabaseDdl._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.GetDatabaseDdlResponse()
+ pb_resp = spanner_database_admin.GetDatabaseDdlResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_database_ddl(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_database_ddl_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.GetDatabaseDdlResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database_ddl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetDatabaseDdl",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetIamPolicy(
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetIamPolicy")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.GetIamPolicyRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Call the get iam policy method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.GetIamPolicyRequest):
+ The request object. Request message for ``GetIamPolicy`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which
+ specifies access controls for Google Cloud resources.
+
+ A ``Policy`` is a collection of ``bindings``. A
+ ``binding`` binds one or more ``members``, or
+ principals, to a single ``role``. Principals can be user
+ accounts, service accounts, Google groups, and domains
+ (such as G Suite). A ``role`` is a named list of
+ permissions; each ``role`` can be an IAM predefined role
+ or a user-created custom role.
+
+ For some types of Google Cloud resources, a ``binding``
+ can also specify a ``condition``, which is a logical
+ expression that allows access to a resource only if the
+ expression evaluates to ``true``. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the `IAM
+ documentation `__.
+
+ **JSON example:**
+
+ ::
+
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
+
+ **YAML example:**
+
+ ::
+
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
+
+ For a description of IAM and its features, see the `IAM
+ documentation `__.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_iam_policy(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetIamPolicy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetIamPolicy",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetIamPolicy._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = policy_pb2.Policy()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_iam_policy(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_iam_policy_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_iam_policy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetIamPolicy",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _InternalUpdateGraphOperation(
+ _BaseDatabaseAdminRestTransport._BaseInternalUpdateGraphOperation,
+ DatabaseAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.InternalUpdateGraphOperation")
+
+ def __call__(
+ self,
+ request: spanner_database_admin.InternalUpdateGraphOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.InternalUpdateGraphOperationResponse:
+ raise NotImplementedError(
+ "Method InternalUpdateGraphOperation is not available over REST transport"
+ )
+
+ class _ListBackupOperations(
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackupOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.ListBackupOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.ListBackupOperationsResponse:
+ r"""Call the list backup operations method over HTTP.
+
+ Args:
+ request (~.backup.ListBackupOperationsRequest):
+ The request object. The request for
+ [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.ListBackupOperationsResponse:
+ The response for
+ [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backup_operations(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackupOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.ListBackupOperationsResponse()
+ pb_resp = backup.ListBackupOperationsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backup_operations(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backup_operations_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.ListBackupOperationsResponse.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_operations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupOperations",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListBackups(
+ _BaseDatabaseAdminRestTransport._BaseListBackups, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackups")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup.ListBackupsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup.ListBackupsResponse:
+ r"""Call the list backups method over HTTP.
+
+ Args:
+ request (~.backup.ListBackupsRequest):
+ The request object. The request for
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup.ListBackupsResponse:
+ The response for
+ [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backups(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackups._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = (
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_query_params_json(
+ transcoded_request
+ )
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackups",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackups",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackups._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup.ListBackupsResponse()
+ pb_resp = backup.ListBackupsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backups(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backups_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = backup.ListBackupsResponse.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backups",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackups",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListBackupSchedules(
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListBackupSchedules")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: backup_schedule.ListBackupSchedulesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> backup_schedule.ListBackupSchedulesResponse:
+ r"""Call the list backup schedules method over HTTP.
+
+ Args:
+ request (~.backup_schedule.ListBackupSchedulesRequest):
+ The request object. The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.backup_schedule.ListBackupSchedulesResponse:
+ The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_backup_schedules(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupSchedules",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupSchedules",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListBackupSchedules._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = backup_schedule.ListBackupSchedulesResponse()
+ pb_resp = backup_schedule.ListBackupSchedulesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_backup_schedules(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_backup_schedules_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ backup_schedule.ListBackupSchedulesResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_schedules",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListBackupSchedules",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabaseOperations(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations,
+ DatabaseAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabaseOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabaseOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabaseOperationsResponse:
+ r"""Call the list database operations method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabaseOperationsRequest):
+ The request object. The request for
+ [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabaseOperationsResponse:
+ The response for
+ [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_database_operations(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabaseOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabaseOperationsResponse()
+ pb_resp = spanner_database_admin.ListDatabaseOperationsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_database_operations(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_database_operations_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabaseOperationsResponse.to_json(
+ response
+ )
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_operations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseOperations",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabaseRoles(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabaseRoles")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabaseRolesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabaseRolesResponse:
+ r"""Call the list database roles method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabaseRolesRequest):
+ The request object. The request for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabaseRolesResponse:
+ The response for
+ [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_database_roles(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseRoles",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseRoles",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabaseRoles._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabaseRolesResponse()
+ pb_resp = spanner_database_admin.ListDatabaseRolesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_database_roles(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_database_roles_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabaseRolesResponse.to_json(
+ response
+ )
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_roles",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabaseRoles",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListDatabases(
+ _BaseDatabaseAdminRestTransport._BaseListDatabases, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListDatabases")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.ListDatabasesRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_database_admin.ListDatabasesResponse:
+ r"""Call the list databases method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.ListDatabasesRequest):
+ The request object. The request for
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.spanner_database_admin.ListDatabasesResponse:
+ The response for
+ [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListDatabases._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_databases(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabases._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListDatabases._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabases",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabases",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListDatabases._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = spanner_database_admin.ListDatabasesResponse()
+ pb_resp = spanner_database_admin.ListDatabasesResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_databases(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_databases_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ spanner_database_admin.ListDatabasesResponse.to_json(response)
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_databases",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListDatabases",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _RestoreDatabase(
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.RestoreDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.RestoreDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the restore database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.RestoreDatabaseRequest):
+ The request object. The request for
+ [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_restore_database(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.RestoreDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "RestoreDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._RestoreDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_restore_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_restore_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.restore_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "RestoreDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _SetIamPolicy(
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.SetIamPolicy")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.SetIamPolicyRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Call the set iam policy method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.SetIamPolicyRequest):
+ The request object. Request message for ``SetIamPolicy`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which
+ specifies access controls for Google Cloud resources.
+
+ A ``Policy`` is a collection of ``bindings``. A
+ ``binding`` binds one or more ``members``, or
+ principals, to a single ``role``. Principals can be user
+ accounts, service accounts, Google groups, and domains
+ (such as G Suite). A ``role`` is a named list of
+ permissions; each ``role`` can be an IAM predefined role
+ or a user-created custom role.
+
+ For some types of Google Cloud resources, a ``binding``
+ can also specify a ``condition``, which is a logical
+ expression that allows access to a resource only if the
+ expression evaluates to ``true``. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the `IAM
+ documentation `__.
+
+ **JSON example:**
+
+ ::
+
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
+
+ **YAML example:**
+
+ ::
+
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
+
+ For a description of IAM and its features, see the `IAM
+ documentation `__.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_set_iam_policy(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.SetIamPolicy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "SetIamPolicy",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._SetIamPolicy._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = policy_pb2.Policy()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_set_iam_policy(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_set_iam_policy_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.set_iam_policy",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "SetIamPolicy",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _TestIamPermissions(
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.TestIamPermissions")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: iam_policy_pb2.TestIamPermissionsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ r"""Call the test iam permissions method over HTTP.
+
+ Args:
+ request (~.iam_policy_pb2.TestIamPermissionsRequest):
+ The request object. Request message for ``TestIamPermissions`` method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.iam_policy_pb2.TestIamPermissionsResponse:
+ Response message for ``TestIamPermissions`` method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_test_iam_permissions(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.TestIamPermissions",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "TestIamPermissions",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._TestIamPermissions._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = iam_policy_pb2.TestIamPermissionsResponse()
+ pb_resp = resp
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_test_iam_permissions(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_test_iam_permissions_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.test_iam_permissions",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "TestIamPermissions",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateBackup(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateBackup")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup.UpdateBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup.Backup:
+ r"""Call the update backup method over HTTP.
+
+ Args:
+ request (~.gsad_backup.UpdateBackupRequest):
+ The request object. The request for
+ [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup.Backup:
+ A backup of a Cloud Spanner database.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_backup(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackup",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateBackup._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup.Backup()
+ pb_resp = gsad_backup.Backup.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_backup(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_backup_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup.Backup.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackup",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateBackupSchedule(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateBackupSchedule")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: gsad_backup_schedule.UpdateBackupScheduleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> gsad_backup_schedule.BackupSchedule:
+ r"""Call the update backup schedule method over HTTP.
+
+ Args:
+ request (~.gsad_backup_schedule.UpdateBackupScheduleRequest):
+ The request object. The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.gsad_backup_schedule.BackupSchedule:
+ BackupSchedule expresses the
+ automated backup creation specification
+ for a Spanner database. Next ID: 10
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_backup_schedule(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackupSchedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackupSchedule",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateBackupSchedule._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gsad_backup_schedule.BackupSchedule()
+ pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_backup_schedule(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_backup_schedule_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gsad_backup_schedule.BackupSchedule.to_json(
+ response
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup_schedule",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateBackupSchedule",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateDatabase(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateDatabase")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.UpdateDatabaseRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the update database method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.UpdateDatabaseRequest):
+ The request object. The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_database(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabase",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabase",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateDatabase._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_database(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_database_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabase",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _UpdateDatabaseDdl(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.UpdateDatabaseDdl")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: spanner_database_admin.UpdateDatabaseDdlRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the update database ddl method over HTTP.
+
+ Args:
+ request (~.spanner_database_admin.UpdateDatabaseDdlRequest):
+ The request object. Enqueues the given DDL statements to be applied, in
+ order but not necessarily all at once, to the database
+ schema at some point (or points) in the future. The
+ server checks that the statements are executable
+ (syntactically valid, name tables that exist, etc.)
+ before enqueueing them, but they may still fail upon
+ later execution (e.g., if a statement from another batch
+ of statements is applied first and it conflicts in some
+ way, or if there is some data-related problem like a
+ ``NULL`` value in a column to which ``NOT NULL`` would
+ be added). If a statement fails, all subsequent
+ statements in the batch are automatically cancelled.
+
+ Each batch of statements is assigned a name which can be
+ used with the
+ [Operations][google.longrunning.Operations] API to
+ monitor progress. See the
+ [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+ field for more details.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_database_ddl(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabaseDdl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabaseDdl",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._UpdateDatabaseDdl._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_database_ddl(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_database_ddl_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database_ddl",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "UpdateDatabaseDdl",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ @property
+ def add_split_points(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.AddSplitPointsRequest],
+ spanner_database_admin.AddSplitPointsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._AddSplitPoints(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[backup.CopyBackupRequest], operations_pb2.Operation]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_backup(
+ self,
+ ) -> Callable[[gsad_backup.CreateBackupRequest], operations_pb2.Operation]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.CreateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def create_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.CreateDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def delete_backup_schedule(
+ self,
+ ) -> Callable[[backup_schedule.DeleteBackupScheduleRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DeleteBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def drop_database(
+ self,
+ ) -> Callable[[spanner_database_admin.DropDatabaseRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DropDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_backup_schedule(
+ self,
+ ) -> Callable[
+ [backup_schedule.GetBackupScheduleRequest], backup_schedule.BackupSchedule
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.GetDatabaseRequest], spanner_database_admin.Database
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_database_ddl(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.GetDatabaseDdlRequest],
+ spanner_database_admin.GetDatabaseDdlResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetDatabaseDdl(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def get_iam_policy(
+ self,
+ ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def internal_update_graph_operation(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.InternalUpdateGraphOperationRequest],
+ spanner_database_admin.InternalUpdateGraphOperationResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._InternalUpdateGraphOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backup_operations(
+ self,
+ ) -> Callable[
+ [backup.ListBackupOperationsRequest], backup.ListBackupOperationsResponse
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackupOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backups(
+ self,
+ ) -> Callable[[backup.ListBackupsRequest], backup.ListBackupsResponse]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_backup_schedules(
+ self,
+ ) -> Callable[
+ [backup_schedule.ListBackupSchedulesRequest],
+ backup_schedule.ListBackupSchedulesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListBackupSchedules(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_database_operations(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseOperationsRequest],
+ spanner_database_admin.ListDatabaseOperationsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabaseOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_database_roles(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabaseRolesRequest],
+ spanner_database_admin.ListDatabaseRolesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabaseRoles(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def list_databases(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.ListDatabasesRequest],
+ spanner_database_admin.ListDatabasesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListDatabases(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def restore_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.RestoreDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._RestoreDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def set_iam_policy(
+ self,
+ ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def test_iam_permissions(
+ self,
+ ) -> Callable[
+ [iam_policy_pb2.TestIamPermissionsRequest],
+ iam_policy_pb2.TestIamPermissionsResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_backup(
+ self,
+ ) -> Callable[[gsad_backup.UpdateBackupRequest], gsad_backup.Backup]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_backup_schedule(
+ self,
+ ) -> Callable[
+ [gsad_backup_schedule.UpdateBackupScheduleRequest],
+ gsad_backup_schedule.BackupSchedule,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateBackupSchedule(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_database(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateDatabase(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def update_database_ddl(
+ self,
+ ) -> Callable[
+ [spanner_database_admin.UpdateDatabaseDdlRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateDatabaseDdl(self._session, self._host, self._interceptor) # type: ignore
+
+ @property
+ def cancel_operation(self):
+ return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _CancelOperation(
+ _BaseDatabaseAdminRestTransport._BaseCancelOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.CancelOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.CancelOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Call the cancel operation method over HTTP.
+
+ Args:
+ request (operations_pb2.CancelOperationRequest):
+ The request object for CancelOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_cancel_operation(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CancelOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "CancelOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._CancelOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ return self._interceptor.post_cancel_operation(None)
+
+ @property
+ def delete_operation(self):
+ return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _DeleteOperation(
+ _BaseDatabaseAdminRestTransport._BaseDeleteOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.DeleteOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.DeleteOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Call the delete operation method over HTTP.
+
+ Args:
+ request (operations_pb2.DeleteOperationRequest):
+ The request object for DeleteOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_operation(
+ request, metadata
+ )
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "DeleteOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._DeleteOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ return self._interceptor.post_delete_operation(None)
+
+ @property
+ def get_operation(self):
+ return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore
+
+ class _GetOperation(
+ _BaseDatabaseAdminRestTransport._BaseGetOperation, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.GetOperation")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.GetOperationRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the get operation method over HTTP.
+
+ Args:
+ request (operations_pb2.GetOperationRequest):
+ The request object for GetOperation method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ operations_pb2.Operation: Response from GetOperation method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseGetOperation._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_operation(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetOperation._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseGetOperation._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetOperation",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._GetOperation._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ content = response.content.decode("utf-8")
+ resp = operations_pb2.Operation()
+ resp = json_format.Parse(content, resp)
+ resp = self._interceptor.post_get_operation(resp)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.GetOperation",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "GetOperation",
+ "httpResponse": http_response,
+ "metadata": http_response["headers"],
+ },
+ )
+ return resp
+
+ @property
+ def list_operations(self):
+ return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore
+
+ class _ListOperations(
+ _BaseDatabaseAdminRestTransport._BaseListOperations, DatabaseAdminRestStub
+ ):
+ def __hash__(self):
+ return hash("DatabaseAdminRestTransport.ListOperations")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: operations_pb2.ListOperationsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.ListOperationsResponse:
+ r"""Call the list operations method over HTTP.
+
+ Args:
+ request (operations_pb2.ListOperationsRequest):
+ The request object for ListOperations method.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ operations_pb2.ListOperationsResponse: Response from ListOperations method.
+ """
+
+ http_options = (
+ _BaseDatabaseAdminRestTransport._BaseListOperations._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_operations(request, metadata)
+ transcoded_request = _BaseDatabaseAdminRestTransport._BaseListOperations._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseDatabaseAdminRestTransport._BaseListOperations._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListOperations",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = DatabaseAdminRestTransport._ListOperations._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ content = response.content.decode("utf-8")
+ resp = operations_pb2.ListOperationsResponse()
+ resp = json_format.Parse(content, resp)
+ resp = self._interceptor.post_list_operations(resp)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.ListOperations",
+ extra={
+ "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin",
+ "rpcName": "ListOperations",
+ "httpResponse": http_response,
+ "metadata": http_response["headers"],
+ },
+ )
+ return resp
+
+ @property
+ def kind(self) -> str:
+ return "rest"
+
+ def close(self):
+ self._session.close()
+
+
+__all__ = ("DatabaseAdminRestTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py
new file mode 100644
index 0000000000..d0ee0a2cbb
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py
@@ -0,0 +1,1654 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import json # type: ignore
+from google.api_core import path_template
+from google.api_core import gapic_v1
+
+from google.protobuf import json_format
+from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
+
+import re
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
+from google.cloud.spanner_admin_database_v1.types import backup_schedule
+from google.cloud.spanner_admin_database_v1.types import (
+ backup_schedule as gsad_backup_schedule,
+)
+from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+
+
+class _BaseDatabaseAdminRestTransport(DatabaseAdminTransport):
+ """Base REST backend transport for DatabaseAdmin.
+
+ Note: This class is not meant to be used directly. Use its sync and
+ async sub-classes instead.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends JSON representations of protocol buffers over HTTP/1.1
+ """
+
+ def __init__(
+ self,
+ *,
+ host: str = "spanner.googleapis.com",
+ credentials: Optional[Any] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ url_scheme: str = "https",
+ api_audience: Optional[str] = None,
+ ) -> None:
+ """Instantiate the transport.
+ Args:
+ host (Optional[str]):
+ The hostname to connect to (default: 'spanner.googleapis.com').
+ credentials (Optional[Any]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you are developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+ url_scheme: the protocol scheme for the API endpoint. Normally
+ "https", but for testing or local servers,
+ "http" can be specified.
+ """
+ # Run the base constructor
+ maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host)
+ if maybe_url_match is None:
+ raise ValueError(
+ f"Unexpected hostname structure: {host}"
+ ) # pragma: NO COVER
+
+ url_match_items = maybe_url_match.groupdict()
+
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
+
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ api_audience=api_audience,
+ )
+
+ class _BaseAddSplitPoints:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.AddSplitPointsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCopyBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups:copy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.CopyBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "backupId": "",
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups",
+ "body": "backup",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup.CreateBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "backupScheduleId": "",
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules",
+ "body": "backup_schedule",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup_schedule.CreateBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCreateDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.CreateDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDeleteBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.DeleteBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDeleteBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.DeleteBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseDropDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.DropDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.GetBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.GetBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.GetDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetDatabaseDdl:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}/ddl",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.GetDatabaseDdlRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseGetIamPolicy:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseInternalUpdateGraphOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ class _BaseListBackupOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/backupOperations",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.ListBackupOperationsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListBackups:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/backups",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup.ListBackupsRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackups._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListBackupSchedules:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = backup_schedule.ListBackupSchedulesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabaseOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/databaseOperations",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabaseOperationsRequest.pb(
+ request
+ )
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabaseRoles:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabaseRolesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseListDatabases:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.ListDatabasesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseListDatabases._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseRestoreDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{parent=projects/*/instances/*}/databases:restore",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.RestoreDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseSetIamPolicy:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseTestIamPermissions:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = request
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateBackup:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{backup.name=projects/*/instances/*/backups/*}",
+ "body": "backup",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup.UpdateBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateBackupSchedule:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{backup_schedule.name=projects/*/instances/*/databases/*/backupSchedules/*}",
+ "body": "backup_schedule",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = gsad_backup_schedule.UpdateBackupScheduleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateDatabase:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "updateMask": {},
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{database.name=projects/*/instances/*/databases/*}",
+ "body": "database",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.UpdateDatabaseRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseUpdateDatabaseDdl:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v1/{database=projects/*/instances/*/databases/*}/ddl",
+ "body": "*",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = spanner_database_admin.UpdateDatabaseDdlRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
+ class _BaseCancelOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}:cancel",
+ },
+ {
+ "method": "post",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}:cancel",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseDeleteOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "delete",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseGetOperation:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations/*}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+ class _BaseListOperations:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/databases/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instances/*/backups/*/operations}",
+ },
+ {
+ "method": "get",
+ "uri": "/v1/{name=projects/*/instanceConfigs/*/operations}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ request_kwargs = json_format.MessageToDict(request)
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
+ return query_params
+
+
+__all__ = ("_BaseDatabaseAdminRestTransport",)
diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py
index 9552559efa..ca79ddec90 100644
--- a/google/cloud/spanner_admin_database_v1/types/__init__.py
+++ b/google/cloud/spanner_admin_database_v1/types/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +16,7 @@
from .backup import (
Backup,
BackupInfo,
+ BackupInstancePartition,
CopyBackupEncryptionConfig,
CopyBackupMetadata,
CopyBackupRequest,
@@ -23,13 +24,26 @@
CreateBackupMetadata,
CreateBackupRequest,
DeleteBackupRequest,
+ FullBackupSpec,
GetBackupRequest,
+ IncrementalBackupSpec,
ListBackupOperationsRequest,
ListBackupOperationsResponse,
ListBackupsRequest,
ListBackupsResponse,
UpdateBackupRequest,
)
+from .backup_schedule import (
+ BackupSchedule,
+ BackupScheduleSpec,
+ CreateBackupScheduleRequest,
+ CrontabSpec,
+ DeleteBackupScheduleRequest,
+ GetBackupScheduleRequest,
+ ListBackupSchedulesRequest,
+ ListBackupSchedulesResponse,
+ UpdateBackupScheduleRequest,
+)
from .common import (
EncryptionConfig,
EncryptionInfo,
@@ -37,14 +51,19 @@
DatabaseDialect,
)
from .spanner_database_admin import (
+ AddSplitPointsRequest,
+ AddSplitPointsResponse,
CreateDatabaseMetadata,
CreateDatabaseRequest,
Database,
DatabaseRole,
+ DdlStatementActionInfo,
DropDatabaseRequest,
GetDatabaseDdlRequest,
GetDatabaseDdlResponse,
GetDatabaseRequest,
+ InternalUpdateGraphOperationRequest,
+ InternalUpdateGraphOperationResponse,
ListDatabaseOperationsRequest,
ListDatabaseOperationsResponse,
ListDatabaseRolesRequest,
@@ -56,14 +75,18 @@
RestoreDatabaseMetadata,
RestoreDatabaseRequest,
RestoreInfo,
+ SplitPoints,
UpdateDatabaseDdlMetadata,
UpdateDatabaseDdlRequest,
+ UpdateDatabaseMetadata,
+ UpdateDatabaseRequest,
RestoreSourceType,
)
__all__ = (
"Backup",
"BackupInfo",
+ "BackupInstancePartition",
"CopyBackupEncryptionConfig",
"CopyBackupMetadata",
"CopyBackupRequest",
@@ -71,24 +94,40 @@
"CreateBackupMetadata",
"CreateBackupRequest",
"DeleteBackupRequest",
+ "FullBackupSpec",
"GetBackupRequest",
+ "IncrementalBackupSpec",
"ListBackupOperationsRequest",
"ListBackupOperationsResponse",
"ListBackupsRequest",
"ListBackupsResponse",
"UpdateBackupRequest",
+ "BackupSchedule",
+ "BackupScheduleSpec",
+ "CreateBackupScheduleRequest",
+ "CrontabSpec",
+ "DeleteBackupScheduleRequest",
+ "GetBackupScheduleRequest",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
+ "UpdateBackupScheduleRequest",
"EncryptionConfig",
"EncryptionInfo",
"OperationProgress",
"DatabaseDialect",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
"CreateDatabaseMetadata",
"CreateDatabaseRequest",
"Database",
"DatabaseRole",
+ "DdlStatementActionInfo",
"DropDatabaseRequest",
"GetDatabaseDdlRequest",
"GetDatabaseDdlResponse",
"GetDatabaseRequest",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
"ListDatabaseOperationsRequest",
"ListDatabaseOperationsResponse",
"ListDatabaseRolesRequest",
@@ -100,7 +139,10 @@
"RestoreDatabaseMetadata",
"RestoreDatabaseRequest",
"RestoreInfo",
+ "SplitPoints",
"UpdateDatabaseDdlMetadata",
"UpdateDatabaseDdlRequest",
+ "UpdateDatabaseMetadata",
+ "UpdateDatabaseRequest",
"RestoreSourceType",
)
diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py
index dd42c409b9..da236fb4ff 100644
--- a/google/cloud/spanner_admin_database_v1/types/backup.py
+++ b/google/cloud/spanner_admin_database_v1/types/backup.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import common
@@ -39,6 +43,9 @@
"BackupInfo",
"CreateBackupEncryptionConfig",
"CopyBackupEncryptionConfig",
+ "FullBackupSpec",
+ "IncrementalBackupSpec",
+ "BackupInstancePartition",
},
)
@@ -93,9 +100,33 @@ class Backup(proto.Message):
equivalent to the ``create_time``.
size_bytes (int):
Output only. Size of the backup in bytes.
+ freeable_size_bytes (int):
+ Output only. The number of bytes that will be
+ freed by deleting this backup. This value will
+ be zero if, for example, this backup is part of
+ an incremental backup chain and younger backups
+ in the chain require that we keep its data. For
+ backups not in an incremental backup chain, this
+ is always the size of the backup. This value may
+ change if backups on the same chain get created,
+ deleted or expired.
+ exclusive_size_bytes (int):
+ Output only. For a backup in an incremental
+ backup chain, this is the storage space needed
+ to keep the data that has changed since the
+ previous backup. For all other backups, this is
+ always the size of the backup. This value may
+ change if backups on the same chain get deleted
+ or expired.
+
+ This field can be used to calculate the total
+ storage space used by a set of backups. For
+ example, the total space used by all backups of
+ a database can be computed by summing up this
+ field.
state (google.cloud.spanner_admin_database_v1.types.Backup.State):
Output only. The current state of the backup.
- referencing_databases (Sequence[str]):
+ referencing_databases (MutableSequence[str]):
Output only. The names of the restored databases that
reference the backup. The database names are of the form
``projects//instances//databases/``.
@@ -107,10 +138,20 @@ class Backup(proto.Message):
encryption_info (google.cloud.spanner_admin_database_v1.types.EncryptionInfo):
Output only. The encryption information for
the backup.
+ encryption_information (MutableSequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
+ Output only. The encryption information for the backup,
+ whether it is protected by one or more KMS keys. The
+ information includes all Cloud KMS key versions used to
+ encrypt the backup. The
+ ``encryption_status' field inside of each``\ EncryptionInfo\`
+ is not populated. At least one of the key versions must be
+ available for the backup to be restored. If a key version is
+ revoked in the middle of a restore, the restore behavior is
+ undefined.
database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect):
Output only. The database dialect information
for the backup.
- referencing_backups (Sequence[str]):
+ referencing_backups (MutableSequence[str]):
Output only. The names of the destination backups being
created by copying this source backup. The backup names are
of the form
@@ -127,69 +168,152 @@ class Backup(proto.Message):
UpdateBackup, CopyBackup. When updating or copying an
existing backup, the expiration time specified must be less
than ``Backup.max_expire_time``.
+ backup_schedules (MutableSequence[str]):
+ Output only. List of backup schedule URIs
+ that are associated with creating this backup.
+ This is only applicable for scheduled backups,
+ and is empty for on-demand backups.
+
+ To optimize for storage, whenever possible,
+ multiple schedules are collapsed together to
+ create one backup. In such cases, this field
+ captures the list of all backup schedule URIs
+ that are associated with creating this backup.
+ If collapsing is not done, then this field
+ captures the single backup schedule URI
+ associated with creating this backup.
+ incremental_backup_chain_id (str):
+ Output only. Populated only for backups in an incremental
+ backup chain. Backups share the same chain id if and only if
+ they belong to the same incremental backup chain. Use this
+ field to determine which backups are part of the same
+ incremental backup chain. The ordering of backups in the
+ chain can be determined by ordering the backup
+ ``version_time``.
+ oldest_version_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Data deleted at a time older
+ than this is guaranteed not to be retained in
+ order to support this backup. For a backup in an
+ incremental backup chain, this is the version
+ time of the oldest backup that exists or ever
+ existed in the chain. For all other backups,
+ this is the version time of the backup. This
+ field can be used to understand what data is
+ being retained by the backup system.
+ instance_partitions (MutableSequence[google.cloud.spanner_admin_database_v1.types.BackupInstancePartition]):
+ Output only. The instance partition(s) storing the backup.
+
+ This is the same as the list of the instance partition(s)
+ that the database had footprint in at the backup's
+ ``version_time``.
"""
class State(proto.Enum):
- r"""Indicates the current state of the backup."""
+ r"""Indicates the current state of the backup.
+
+ Values:
+ STATE_UNSPECIFIED (0):
+ Not specified.
+ CREATING (1):
+ The pending backup is still being created. Operations on the
+ backup may fail with ``FAILED_PRECONDITION`` in this state.
+ READY (2):
+ The backup is complete and ready for use.
+ """
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
- database = proto.Field(
+ database: str = proto.Field(
proto.STRING,
number=2,
)
- version_time = proto.Field(
+ version_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
- expire_time = proto.Field(
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- create_time = proto.Field(
+ create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
- size_bytes = proto.Field(
+ size_bytes: int = proto.Field(
proto.INT64,
number=5,
)
- state = proto.Field(
+ freeable_size_bytes: int = proto.Field(
+ proto.INT64,
+ number=15,
+ )
+ exclusive_size_bytes: int = proto.Field(
+ proto.INT64,
+ number=16,
+ )
+ state: State = proto.Field(
proto.ENUM,
number=6,
enum=State,
)
- referencing_databases = proto.RepeatedField(
+ referencing_databases: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=7,
)
- encryption_info = proto.Field(
+ encryption_info: common.EncryptionInfo = proto.Field(
proto.MESSAGE,
number=8,
message=common.EncryptionInfo,
)
- database_dialect = proto.Field(
+ encryption_information: MutableSequence[
+ common.EncryptionInfo
+ ] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=13,
+ message=common.EncryptionInfo,
+ )
+ database_dialect: common.DatabaseDialect = proto.Field(
proto.ENUM,
number=10,
enum=common.DatabaseDialect,
)
- referencing_backups = proto.RepeatedField(
+ referencing_backups: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=11,
)
- max_expire_time = proto.Field(
+ max_expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=12,
message=timestamp_pb2.Timestamp,
)
+ backup_schedules: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=14,
+ )
+ incremental_backup_chain_id: str = proto.Field(
+ proto.STRING,
+ number=17,
+ )
+ oldest_version_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=18,
+ message=timestamp_pb2.Timestamp,
+ )
+ instance_partitions: MutableSequence[
+ "BackupInstancePartition"
+ ] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=19,
+ message="BackupInstancePartition",
+ )
class CreateBackupRequest(proto.Message):
@@ -220,20 +344,20 @@ class CreateBackupRequest(proto.Message):
= ``USE_DATABASE_ENCRYPTION``.
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- backup_id = proto.Field(
+ backup_id: str = proto.Field(
proto.STRING,
number=2,
)
- backup = proto.Field(
+ backup: "Backup" = proto.Field(
proto.MESSAGE,
number=3,
message="Backup",
)
- encryption_config = proto.Field(
+ encryption_config: "CreateBackupEncryptionConfig" = proto.Field(
proto.MESSAGE,
number=4,
message="CreateBackupEncryptionConfig",
@@ -271,20 +395,20 @@ class CreateBackupMetadata(proto.Message):
1, corresponding to ``Code.CANCELLED``.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- database = proto.Field(
+ database: str = proto.Field(
proto.STRING,
number=2,
)
- progress = proto.Field(
+ progress: common.OperationProgress = proto.Field(
proto.MESSAGE,
number=3,
message=common.OperationProgress,
)
- cancel_time = proto.Field(
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
@@ -327,24 +451,24 @@ class CopyBackupRequest(proto.Message):
= ``USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION``.
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- backup_id = proto.Field(
+ backup_id: str = proto.Field(
proto.STRING,
number=2,
)
- source_backup = proto.Field(
+ source_backup: str = proto.Field(
proto.STRING,
number=3,
)
- expire_time = proto.Field(
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
- encryption_config = proto.Field(
+ encryption_config: "CopyBackupEncryptionConfig" = proto.Field(
proto.MESSAGE,
number=5,
message="CopyBackupEncryptionConfig",
@@ -352,7 +476,7 @@ class CopyBackupRequest(proto.Message):
class CopyBackupMetadata(proto.Message):
- r"""Metadata type for the google.longrunning.Operation returned by
+ r"""Metadata type for the operation returned by
[CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
Attributes:
@@ -385,20 +509,20 @@ class CopyBackupMetadata(proto.Message):
1, corresponding to ``Code.CANCELLED``.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- source_backup = proto.Field(
+ source_backup: str = proto.Field(
proto.STRING,
number=2,
)
- progress = proto.Field(
+ progress: common.OperationProgress = proto.Field(
proto.MESSAGE,
number=3,
message=common.OperationProgress,
)
- cancel_time = proto.Field(
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
@@ -416,7 +540,7 @@ class UpdateBackupRequest(proto.Message):
required. Other fields are ignored. Update is only supported
for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A mask specifying which fields (e.g.
``expire_time``) in the Backup resource should be updated.
@@ -426,12 +550,12 @@ class UpdateBackupRequest(proto.Message):
accidentally by clients that do not know about them.
"""
- backup = proto.Field(
+ backup: "Backup" = proto.Field(
proto.MESSAGE,
number=1,
message="Backup",
)
- update_mask = proto.Field(
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
@@ -448,7 +572,7 @@ class GetBackupRequest(proto.Message):
``projects//instances//backups/``.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
@@ -465,7 +589,7 @@ class DeleteBackupRequest(proto.Message):
``projects//instances//backups/``.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
@@ -493,16 +617,17 @@ class ListBackupsRequest(proto.Message):
[Backup][google.spanner.admin.database.v1.Backup] are
eligible for filtering:
- - ``name``
- - ``database``
- - ``state``
- - ``create_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``expire_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``version_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``size_bytes``
+ - ``name``
+ - ``database``
+ - ``state``
+ - ``create_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``expire_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``version_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``size_bytes``
+ - ``backup_schedules``
You can combine multiple expressions by enclosing each
expression in parentheses. By default, expressions are
@@ -511,21 +636,23 @@ class ListBackupsRequest(proto.Message):
Here are a few examples:
- - ``name:Howl`` - The backup's name contains the string
- "howl".
- - ``database:prod`` - The database's name contains the
- string "prod".
- - ``state:CREATING`` - The backup is pending creation.
- - ``state:READY`` - The backup is fully created and ready
- for use.
- - ``(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")``
- - The backup name contains the string "howl" and
- ``create_time`` of the backup is before
- 2018-03-28T14:50:00Z.
- - ``expire_time < \"2018-03-28T14:50:00Z\"`` - The backup
- ``expire_time`` is before 2018-03-28T14:50:00Z.
- - ``size_bytes > 10000000000`` - The backup's size is
- greater than 10GB
+ - ``name:Howl`` - The backup's name contains the string
+ "howl".
+ - ``database:prod`` - The database's name contains the
+ string "prod".
+ - ``state:CREATING`` - The backup is pending creation.
+ - ``state:READY`` - The backup is fully created and ready
+ for use.
+ - ``(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")``
+ - The backup name contains the string "howl" and
+ ``create_time`` of the backup is before
+ 2018-03-28T14:50:00Z.
+ - ``expire_time < \"2018-03-28T14:50:00Z\"`` - The backup
+ ``expire_time`` is before 2018-03-28T14:50:00Z.
+ - ``size_bytes > 10000000000`` - The backup's size is
+ greater than 10GB
+ - ``backup_schedules:daily`` - The backup is created from a
+ schedule with "daily" in its name.
page_size (int):
Number of backups to be returned in the
response. If 0 or less, defaults to the server's
@@ -538,19 +665,19 @@ class ListBackupsRequest(proto.Message):
to the same ``parent`` and with the same ``filter``.
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- filter = proto.Field(
+ filter: str = proto.Field(
proto.STRING,
number=2,
)
- page_size = proto.Field(
+ page_size: int = proto.Field(
proto.INT32,
number=3,
)
- page_token = proto.Field(
+ page_token: str = proto.Field(
proto.STRING,
number=4,
)
@@ -561,7 +688,7 @@ class ListBackupsResponse(proto.Message):
[ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
Attributes:
- backups (Sequence[google.cloud.spanner_admin_database_v1.types.Backup]):
+ backups (MutableSequence[google.cloud.spanner_admin_database_v1.types.Backup]):
The list of matching backups. Backups returned are ordered
by ``create_time`` in descending order, starting from the
most recent ``create_time``.
@@ -575,12 +702,12 @@ class ListBackupsResponse(proto.Message):
def raw_page(self):
return self
- backups = proto.RepeatedField(
+ backups: MutableSequence["Backup"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Backup",
)
- next_page_token = proto.Field(
+ next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
@@ -609,21 +736,21 @@ class ListBackupOperationsRequest(proto.Message):
[operation][google.longrunning.Operation] are eligible for
filtering:
- - ``name`` - The name of the long-running operation
- - ``done`` - False if the operation is in progress, else
- true.
- - ``metadata.@type`` - the type of metadata. For example,
- the type string for
- [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
- is
- ``type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata``.
- - ``metadata.`` - any field in metadata.value.
- ``metadata.@type`` must be specified first if filtering
- on metadata fields.
- - ``error`` - Error associated with the long-running
- operation.
- - ``response.@type`` - the type of response.
- - ``response.`` - any field in response.value.
+ - ``name`` - The name of the long-running operation
+ - ``done`` - False if the operation is in progress, else
+ true.
+ - ``metadata.@type`` - the type of metadata. For example,
+ the type string for
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ is
+ ``type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata``.
+ - ``metadata.`` - any field in metadata.value.
+ ``metadata.@type`` must be specified first if filtering on
+ metadata fields.
+ - ``error`` - Error associated with the long-running
+ operation.
+ - ``response.@type`` - the type of response.
+ - ``response.`` - any field in response.value.
You can combine multiple expressions by enclosing each
expression in parentheses. By default, expressions are
@@ -632,56 +759,55 @@ class ListBackupOperationsRequest(proto.Message):
Here are a few examples:
- - ``done:true`` - The operation is complete.
- - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
- ``metadata.database:prod`` - Returns operations where:
-
- - The operation's metadata type is
- [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- - The database the backup was taken from has a name
- containing the string "prod".
-
- - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
- ``(metadata.name:howl) AND``
- ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
- ``(error:*)`` - Returns operations where:
-
- - The operation's metadata type is
- [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- - The backup name contains the string "howl".
- - The operation started before 2018-03-28T14:50:00Z.
- - The operation resulted in an error.
-
- - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND``
- ``(metadata.source_backup:test) AND``
- ``(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND``
- ``(error:*)`` - Returns operations where:
-
- - The operation's metadata type is
- [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
- - The source backup of the copied backup name contains
- the string "test".
- - The operation started before 2022-01-18T14:50:00Z.
- - The operation resulted in an error.
-
- - ``((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
- ``(metadata.database:test_db)) OR``
- ``((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND``
- ``(metadata.source_backup:test_bkp)) AND``
- ``(error:*)`` - Returns operations where:
-
- - The operation's metadata matches either of criteria:
-
- - The operation's metadata type is
- [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
- AND the database the backup was taken from has name
- containing string "test_db"
- - The operation's metadata type is
- [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
- AND the backup the backup was copied from has name
- containing string "test_bkp"
-
- - The operation resulted in an error.
+ - ``done:true`` - The operation is complete.
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
+ ``metadata.database:prod`` - Returns operations where:
+
+ - The operation's metadata type is
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ - The source database name of backup contains the string
+ "prod".
+
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
+ ``(metadata.name:howl) AND``
+ ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
+ ``(error:*)`` - Returns operations where:
+
+ - The operation's metadata type is
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ - The backup name contains the string "howl".
+ - The operation started before 2018-03-28T14:50:00Z.
+ - The operation resulted in an error.
+
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND``
+ ``(metadata.source_backup:test) AND``
+ ``(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND``
+ ``(error:*)`` - Returns operations where:
+
+ - The operation's metadata type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ - The source backup name contains the string "test".
+ - The operation started before 2022-01-18T14:50:00Z.
+ - The operation resulted in an error.
+
+ - ``((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND``
+ ``(metadata.database:test_db)) OR``
+ ``((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND``
+ ``(metadata.source_backup:test_bkp)) AND``
+ ``(error:*)`` - Returns operations where:
+
+ - The operation's metadata matches either of criteria:
+
+ - The operation's metadata type is
+ [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ AND the source database name of the backup contains
+ the string "test_db"
+ - The operation's metadata type is
+ [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
+ AND the source backup name contains the string
+ "test_bkp"
+
+ - The operation resulted in an error.
page_size (int):
Number of operations to be returned in the
response. If 0 or less, defaults to the server's
@@ -694,19 +820,19 @@ class ListBackupOperationsRequest(proto.Message):
to the same ``parent`` and with the same ``filter``.
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- filter = proto.Field(
+ filter: str = proto.Field(
proto.STRING,
number=2,
)
- page_size = proto.Field(
+ page_size: int = proto.Field(
proto.INT32,
number=3,
)
- page_token = proto.Field(
+ page_token: str = proto.Field(
proto.STRING,
number=4,
)
@@ -717,7 +843,7 @@ class ListBackupOperationsResponse(proto.Message):
[ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
Attributes:
- operations (Sequence[google.longrunning.operations_pb2.Operation]):
+ operations (MutableSequence[google.longrunning.operations_pb2.Operation]):
The list of matching backup [long-running
operations][google.longrunning.Operation]. Each operation's
name will be prefixed by the backup's name. The operation's
@@ -739,12 +865,12 @@ class ListBackupOperationsResponse(proto.Message):
def raw_page(self):
return self
- operations = proto.RepeatedField(
+ operations: MutableSequence[operations_pb2.Operation] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=operations_pb2.Operation,
)
- next_page_token = proto.Field(
+ next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
@@ -773,21 +899,21 @@ class BackupInfo(proto.Message):
from.
"""
- backup = proto.Field(
+ backup: str = proto.Field(
proto.STRING,
number=1,
)
- version_time = proto.Field(
+ version_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
- create_time = proto.Field(
+ create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
- source_database = proto.Field(
+ source_database: str = proto.Field(
proto.STRING,
number=3,
)
@@ -805,24 +931,64 @@ class CreateBackupEncryptionConfig(proto.Message):
[encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Optional. Specifies the KMS configuration for the one or
+ more keys used to protect the backup. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the backup's instance configuration. Some
+ examples:
+
+ - For single region instance configs, specify a single
+ regional location KMS key.
+ - For multi-regional instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For an instance config of type USER_MANAGED, please
+ specify only regional location KMS keys to cover each
+ region in the instance config. Multi-regional location KMS
+ keys are not supported for USER_MANAGED instance configs.
"""
class EncryptionType(proto.Enum):
- r"""Encryption types for the backup."""
+ r"""Encryption types for the backup.
+
+ Values:
+ ENCRYPTION_TYPE_UNSPECIFIED (0):
+ Unspecified. Do not use.
+ USE_DATABASE_ENCRYPTION (1):
+ Use the same encryption configuration as the database. This
+ is the default option when
+ [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
+ is empty. For example, if the database is using
+ ``Customer_Managed_Encryption``, the backup will be using
+ the same Cloud KMS key as the database.
+ GOOGLE_DEFAULT_ENCRYPTION (2):
+ Use Google default encryption.
+ CUSTOMER_MANAGED_ENCRYPTION (3):
+ Use customer managed encryption. If specified,
+ ``kms_key_name`` must contain a valid Cloud KMS key.
+ """
ENCRYPTION_TYPE_UNSPECIFIED = 0
USE_DATABASE_ENCRYPTION = 1
GOOGLE_DEFAULT_ENCRYPTION = 2
CUSTOMER_MANAGED_ENCRYPTION = 3
- encryption_type = proto.Field(
+ encryption_type: EncryptionType = proto.Field(
proto.ENUM,
number=1,
enum=EncryptionType,
)
- kms_key_name = proto.Field(
+ kms_key_name: str = proto.Field(
proto.STRING,
number=2,
)
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
class CopyBackupEncryptionConfig(proto.Message):
@@ -837,24 +1003,102 @@ class CopyBackupEncryptionConfig(proto.Message):
[encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Optional. Specifies the KMS configuration for the one or
+ more keys used to protect the backup. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+ Kms keys specified can be in any order.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the backup's instance configuration. Some
+ examples:
+
+ - For single region instance configs, specify a single
+ regional location KMS key.
+ - For multi-regional instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For an instance config of type USER_MANAGED, please
+ specify only regional location KMS keys to cover each
+ region in the instance config. Multi-regional location KMS
+ keys are not supported for USER_MANAGED instance configs.
"""
class EncryptionType(proto.Enum):
- r"""Encryption types for the backup."""
+ r"""Encryption types for the backup.
+
+ Values:
+ ENCRYPTION_TYPE_UNSPECIFIED (0):
+ Unspecified. Do not use.
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION (1):
+ This is the default option for
+ [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
+ when
+ [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
+ is not specified. For example, if the source backup is using
+ ``Customer_Managed_Encryption``, the backup will be using
+ the same Cloud KMS key as the source backup.
+ GOOGLE_DEFAULT_ENCRYPTION (2):
+ Use Google default encryption.
+ CUSTOMER_MANAGED_ENCRYPTION (3):
+ Use customer managed encryption. If specified, either
+ ``kms_key_name`` or ``kms_key_names`` must contain valid
+ Cloud KMS key(s).
+ """
ENCRYPTION_TYPE_UNSPECIFIED = 0
USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1
GOOGLE_DEFAULT_ENCRYPTION = 2
CUSTOMER_MANAGED_ENCRYPTION = 3
- encryption_type = proto.Field(
+ encryption_type: EncryptionType = proto.Field(
proto.ENUM,
number=1,
enum=EncryptionType,
)
- kms_key_name = proto.Field(
+ kms_key_name: str = proto.Field(
proto.STRING,
number=2,
)
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
+
+
+class FullBackupSpec(proto.Message):
+ r"""The specification for full backups.
+ A full backup stores the entire contents of the database at a
+ given version time.
+
+ """
+
+
+class IncrementalBackupSpec(proto.Message):
+ r"""The specification for incremental backup chains.
+ An incremental backup stores the delta of changes between a
+ previous backup and the database contents at a given version
+ time. An incremental backup chain consists of a full backup and
+ zero or more successive incremental backups. The first backup
+ created for an incremental backup chain is always a full backup.
+
+ """
+
+
+class BackupInstancePartition(proto.Message):
+ r"""Instance partition information for the backup.
+
+ Attributes:
+ instance_partition (str):
+ A unique identifier for the instance partition. Values are
+ of the form
+ ``projects//instances//instancePartitions/``
+ """
+
+ instance_partition: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_database_v1/types/backup_schedule.py b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py
new file mode 100644
index 0000000000..2773c1ef63
--- /dev/null
+++ b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py
@@ -0,0 +1,369 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
+import proto # type: ignore
+
+from google.cloud.spanner_admin_database_v1.types import backup
+from google.protobuf import duration_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.spanner.admin.database.v1",
+ manifest={
+ "BackupScheduleSpec",
+ "BackupSchedule",
+ "CrontabSpec",
+ "CreateBackupScheduleRequest",
+ "GetBackupScheduleRequest",
+ "DeleteBackupScheduleRequest",
+ "ListBackupSchedulesRequest",
+ "ListBackupSchedulesResponse",
+ "UpdateBackupScheduleRequest",
+ },
+)
+
+
+class BackupScheduleSpec(proto.Message):
+ r"""Defines specifications of the backup schedule.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ cron_spec (google.cloud.spanner_admin_database_v1.types.CrontabSpec):
+ Cron style schedule specification.
+
+ This field is a member of `oneof`_ ``schedule_spec``.
+ """
+
+ cron_spec: "CrontabSpec" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="schedule_spec",
+ message="CrontabSpec",
+ )
+
+
+class BackupSchedule(proto.Message):
+ r"""BackupSchedule expresses the automated backup creation
+ specification for a Spanner database.
+ Next ID: 10
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ name (str):
+ Identifier. Output only for the
+ [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule]
+ operation. Required for the
+ [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
+ operation. A globally unique identifier for the backup
+ schedule which cannot be changed. Values are of the form
+ ``projects//instances//databases//backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]``
+ The final segment of the name must be between 2 and 60
+ characters in length.
+ spec (google.cloud.spanner_admin_database_v1.types.BackupScheduleSpec):
+ Optional. The schedule specification based on
+ which the backup creations are triggered.
+ retention_duration (google.protobuf.duration_pb2.Duration):
+ Optional. The retention duration of a backup
+ that must be at least 6 hours and at most 366
+ days. The backup is eligible to be automatically
+ deleted once the retention period has elapsed.
+ encryption_config (google.cloud.spanner_admin_database_v1.types.CreateBackupEncryptionConfig):
+ Optional. The encryption configuration that
+ will be used to encrypt the backup. If this
+ field is not specified, the backup will use the
+ same encryption configuration as the database.
+ full_backup_spec (google.cloud.spanner_admin_database_v1.types.FullBackupSpec):
+ The schedule creates only full backups.
+
+ This field is a member of `oneof`_ ``backup_type_spec``.
+ incremental_backup_spec (google.cloud.spanner_admin_database_v1.types.IncrementalBackupSpec):
+ The schedule creates incremental backup
+ chains.
+
+ This field is a member of `oneof`_ ``backup_type_spec``.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. The timestamp at which the
+ schedule was last updated. If the schedule has
+ never been updated, this field contains the
+ timestamp when the schedule was first created.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ spec: "BackupScheduleSpec" = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message="BackupScheduleSpec",
+ )
+ retention_duration: duration_pb2.Duration = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=duration_pb2.Duration,
+ )
+ encryption_config: backup.CreateBackupEncryptionConfig = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=backup.CreateBackupEncryptionConfig,
+ )
+ full_backup_spec: backup.FullBackupSpec = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ oneof="backup_type_spec",
+ message=backup.FullBackupSpec,
+ )
+ incremental_backup_spec: backup.IncrementalBackupSpec = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ oneof="backup_type_spec",
+ message=backup.IncrementalBackupSpec,
+ )
+ update_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=9,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class CrontabSpec(proto.Message):
+ r"""CrontabSpec can be used to specify the version time and
+ frequency at which the backup should be created.
+
+ Attributes:
+ text (str):
+ Required. Textual representation of the crontab. User can
+ customize the backup frequency and the backup version time
+ using the cron expression. The version time must be in UTC
+ timezone.
+
+ The backup will contain an externally consistent copy of the
+ database at the version time. Allowed frequencies are 12
+ hour, 1 day, 1 week and 1 month. Examples of valid cron
+ specifications:
+
+ - ``0 2/12 * * *`` : every 12 hours at (2, 14) hours past
+ midnight in UTC.
+ - ``0 2,14 * * *`` : every 12 hours at (2,14) hours past
+ midnight in UTC.
+ - ``0 2 * * *`` : once a day at 2 past midnight in UTC.
+ - ``0 2 * * 0`` : once a week every Sunday at 2 past
+ midnight in UTC.
+ - ``0 2 8 * *`` : once a month on 8th day at 2 past midnight
+ in UTC.
+ time_zone (str):
+ Output only. The time zone of the times in
+ ``CrontabSpec.text``. Currently only UTC is supported.
+ creation_window (google.protobuf.duration_pb2.Duration):
+ Output only. Schedule backups will contain an externally
+ consistent copy of the database at the version time
+ specified in ``schedule_spec.cron_spec``. However, Spanner
+ may not initiate the creation of the scheduled backups at
+ that version time. Spanner will initiate the creation of
+ scheduled backups within the time window bounded by the
+ version_time specified in ``schedule_spec.cron_spec`` and
+ version_time + ``creation_window``.
+ """
+
+ text: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ time_zone: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ creation_window: duration_pb2.Duration = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=duration_pb2.Duration,
+ )
+
+
+class CreateBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+
+ Attributes:
+ parent (str):
+ Required. The name of the database that this
+ backup schedule applies to.
+ backup_schedule_id (str):
+ Required. The Id to use for the backup schedule. The
+ ``backup_schedule_id`` appended to ``parent`` forms the full
+ backup schedule name of the form
+ ``projects//instances//databases//backupSchedules/``.
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to create.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ backup_schedule_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ backup_schedule: "BackupSchedule" = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message="BackupSchedule",
+ )
+
+
+class GetBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+
+ Attributes:
+ name (str):
+ Required. The name of the schedule to retrieve. Values are
+ of the form
+ ``projects//instances//databases//backupSchedules/``.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
+
+class DeleteBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+
+ Attributes:
+ name (str):
+ Required. The name of the schedule to delete. Values are of
+ the form
+ ``projects//instances//databases//backupSchedules/``.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
+
+class ListBackupSchedulesRequest(proto.Message):
+ r"""The request for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Attributes:
+ parent (str):
+ Required. Database is the parent resource
+ whose backup schedules should be listed. Values
+ are of the form
+ projects//instances//databases/
+ page_size (int):
+ Optional. Number of backup schedules to be
+ returned in the response. If 0 or less, defaults
+ to the server's maximum allowed page size.
+ page_token (str):
+ Optional. If non-empty, ``page_token`` should contain a
+ [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
+ from a previous
+ [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
+ to the same ``parent``.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=2,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
+
+
+class ListBackupSchedulesResponse(proto.Message):
+ r"""The response for
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+
+ Attributes:
+ backup_schedules (MutableSequence[google.cloud.spanner_admin_database_v1.types.BackupSchedule]):
+ The list of backup schedules for a database.
+ next_page_token (str):
+ ``next_page_token`` can be sent in a subsequent
+ [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
+ call to fetch more of the schedules.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ backup_schedules: MutableSequence["BackupSchedule"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message="BackupSchedule",
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+
+class UpdateBackupScheduleRequest(proto.Message):
+ r"""The request for
+ [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+
+ Attributes:
+ backup_schedule (google.cloud.spanner_admin_database_v1.types.BackupSchedule):
+ Required. The backup schedule to update.
+ ``backup_schedule.name``, and the fields to be updated as
+ specified by ``update_mask`` are required. Other fields are
+ ignored.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. A mask specifying which fields in
+ the BackupSchedule resource should be updated.
+ This mask is relative to the BackupSchedule
+ resource, not to the request message. The field
+ mask must always be specified; this prevents any
+ future fields from being erased accidentally.
+ """
+
+ backup_schedule: "BackupSchedule" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="BackupSchedule",
+ )
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=field_mask_pb2.FieldMask,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_database_v1/types/common.py b/google/cloud/spanner_admin_database_v1/types/common.py
index 6475e588bc..fff1a8756c 100644
--- a/google/cloud/spanner_admin_database_v1/types/common.py
+++ b/google/cloud/spanner_admin_database_v1/types/common.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
@@ -31,7 +35,17 @@
class DatabaseDialect(proto.Enum):
- r"""Indicates the dialect type of a database."""
+ r"""Indicates the dialect type of a database.
+
+ Values:
+ DATABASE_DIALECT_UNSPECIFIED (0):
+ Default value. This value will create a database with the
+ GOOGLE_STANDARD_SQL dialect.
+ GOOGLE_STANDARD_SQL (1):
+ GoogleSQL supported SQL.
+ POSTGRESQL (2):
+ PostgreSQL supported SQL.
+ """
DATABASE_DIALECT_UNSPECIFIED = 0
GOOGLE_STANDARD_SQL = 1
POSTGRESQL = 2
@@ -52,16 +66,16 @@ class OperationProgress(proto.Message):
failed or was completed successfully.
"""
- progress_percent = proto.Field(
+ progress_percent: int = proto.Field(
proto.INT32,
number=1,
)
- start_time = proto.Field(
+ start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
- end_time = proto.Field(
+ end_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
@@ -76,12 +90,36 @@ class EncryptionConfig(proto.Message):
The Cloud KMS key to be used for encrypting and decrypting
the database. Values are of the form
``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Specifies the KMS configuration for the one or more keys
+ used to encrypt the database. Values are of the form
+ ``projects//locations//keyRings//cryptoKeys/``.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the database instance configuration. Some
+ examples:
+
+ - For single region database instance configs, specify a
+ single regional location KMS key.
+ - For multi-regional database instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For a database instance config of type USER_MANAGED,
+ please specify only regional location KMS keys to cover
+ each region in the instance config. Multi-regional
+ location KMS keys are not supported for USER_MANAGED
+ instance configs.
"""
- kms_key_name = proto.Field(
+ kms_key_name: str = proto.Field(
proto.STRING,
number=2,
)
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
class EncryptionInfo(proto.Message):
@@ -102,22 +140,38 @@ class EncryptionInfo(proto.Message):
"""
class Type(proto.Enum):
- r"""Possible encryption types."""
+ r"""Possible encryption types.
+
+ Values:
+ TYPE_UNSPECIFIED (0):
+ Encryption type was not specified, though
+ data at rest remains encrypted.
+ GOOGLE_DEFAULT_ENCRYPTION (1):
+ The data is encrypted at rest with a key that
+ is fully managed by Google. No key version or
+ status will be populated. This is the default
+ state.
+ CUSTOMER_MANAGED_ENCRYPTION (2):
+ The data is encrypted at rest with a key that is managed by
+ the customer. The active version of the key.
+ ``kms_key_version`` will be populated, and
+ ``encryption_status`` may be populated.
+ """
TYPE_UNSPECIFIED = 0
GOOGLE_DEFAULT_ENCRYPTION = 1
CUSTOMER_MANAGED_ENCRYPTION = 2
- encryption_type = proto.Field(
+ encryption_type: Type = proto.Field(
proto.ENUM,
number=3,
enum=Type,
)
- encryption_status = proto.Field(
+ encryption_status: status_pb2.Status = proto.Field(
proto.MESSAGE,
number=4,
message=status_pb2.Status,
)
- kms_key_version = proto.Field(
+ kms_key_version: str = proto.Field(
proto.STRING,
number=2,
)
diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
index 17685ac754..c82fdc87df 100644
--- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
+++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,12 +13,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
import proto # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
from google.cloud.spanner_admin_database_v1.types import common
from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
+from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
@@ -32,7 +39,10 @@
"CreateDatabaseRequest",
"CreateDatabaseMetadata",
"GetDatabaseRequest",
+ "UpdateDatabaseRequest",
+ "UpdateDatabaseMetadata",
"UpdateDatabaseDdlRequest",
+ "DdlStatementActionInfo",
"UpdateDatabaseDdlMetadata",
"DropDatabaseRequest",
"GetDatabaseDdlRequest",
@@ -46,12 +56,25 @@
"DatabaseRole",
"ListDatabaseRolesRequest",
"ListDatabaseRolesResponse",
+ "AddSplitPointsRequest",
+ "AddSplitPointsResponse",
+ "SplitPoints",
+ "InternalUpdateGraphOperationRequest",
+ "InternalUpdateGraphOperationResponse",
},
)
class RestoreSourceType(proto.Enum):
- r"""Indicates the type of the restore source."""
+ r"""Indicates the type of the restore source.
+
+ Values:
+ TYPE_UNSPECIFIED (0):
+ No restore associated.
+ BACKUP (1):
+ A backup was used as the source of the
+ restore.
+ """
TYPE_UNSPECIFIED = 0
BACKUP = 1
@@ -71,12 +94,12 @@ class RestoreInfo(proto.Message):
This field is a member of `oneof`_ ``source_info``.
"""
- source_type = proto.Field(
+ source_type: "RestoreSourceType" = proto.Field(
proto.ENUM,
number=1,
enum="RestoreSourceType",
)
- backup_info = proto.Field(
+ backup_info: gsad_backup.BackupInfo = proto.Field(
proto.MESSAGE,
number=2,
oneof="source_info",
@@ -109,19 +132,20 @@ class Database(proto.Message):
the encryption configuration for the database.
For databases that are using Google default or
other types of encryption, this field is empty.
- encryption_info (Sequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
- Output only. For databases that are using
- customer managed encryption, this field contains
- the encryption information for the database,
- such as encryption state and the Cloud KMS key
- versions that are in use.
- For databases that are using Google default or
- other types of encryption, this field is empty.
-
- This field is propagated lazily from the
- backend. There might be a delay from when a key
- version is being used and when it appears in
- this field.
+ encryption_info (MutableSequence[google.cloud.spanner_admin_database_v1.types.EncryptionInfo]):
+ Output only. For databases that are using customer managed
+ encryption, this field contains the encryption information
+ for the database, such as all Cloud KMS key versions that
+ are in use. The
+ ``encryption_status' field inside of each``\ EncryptionInfo\`
+ is not populated.
+
+ For databases that are using Google default or other types
+ of encryption, this field is empty.
+
+ This field is propagated lazily from the backend. There
+ might be a delay from when a key version is being used and
+ when it appears in this field.
version_retention_period (str):
Output only. The period in which Cloud Spanner retains all
versions of data for the database. This is the same as the
@@ -148,62 +172,101 @@ class Database(proto.Message):
database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect):
Output only. The dialect of the Cloud Spanner
Database.
+ enable_drop_protection (bool):
+ Whether drop protection is enabled for this database.
+ Defaults to false, if not set. For more details, please see
+ how to `prevent accidental database
+ deletion `__.
+ reconciling (bool):
+ Output only. If true, the database is being
+ updated. If false, there are no ongoing update
+ operations for the database.
"""
class State(proto.Enum):
- r"""Indicates the current state of the database."""
+ r"""Indicates the current state of the database.
+
+ Values:
+ STATE_UNSPECIFIED (0):
+ Not specified.
+ CREATING (1):
+ The database is still being created. Operations on the
+ database may fail with ``FAILED_PRECONDITION`` in this
+ state.
+ READY (2):
+ The database is fully created and ready for
+ use.
+ READY_OPTIMIZING (3):
+ The database is fully created and ready for use, but is
+ still being optimized for performance and cannot handle full
+ load.
+
+ In this state, the database still references the backup it
+ was restore from, preventing the backup from being deleted.
+ When optimizations are complete, the full performance of the
+ database will be restored, and the database will transition
+ to ``READY`` state.
+ """
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
READY_OPTIMIZING = 3
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- state = proto.Field(
+ state: State = proto.Field(
proto.ENUM,
number=2,
enum=State,
)
- create_time = proto.Field(
+ create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- restore_info = proto.Field(
+ restore_info: "RestoreInfo" = proto.Field(
proto.MESSAGE,
number=4,
message="RestoreInfo",
)
- encryption_config = proto.Field(
+ encryption_config: common.EncryptionConfig = proto.Field(
proto.MESSAGE,
number=5,
message=common.EncryptionConfig,
)
- encryption_info = proto.RepeatedField(
+ encryption_info: MutableSequence[common.EncryptionInfo] = proto.RepeatedField(
proto.MESSAGE,
number=8,
message=common.EncryptionInfo,
)
- version_retention_period = proto.Field(
+ version_retention_period: str = proto.Field(
proto.STRING,
number=6,
)
- earliest_version_time = proto.Field(
+ earliest_version_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
- default_leader = proto.Field(
+ default_leader: str = proto.Field(
proto.STRING,
number=9,
)
- database_dialect = proto.Field(
+ database_dialect: common.DatabaseDialect = proto.Field(
proto.ENUM,
number=10,
enum=common.DatabaseDialect,
)
+ enable_drop_protection: bool = proto.Field(
+ proto.BOOL,
+ number=11,
+ )
+ reconciling: bool = proto.Field(
+ proto.BOOL,
+ number=12,
+ )
class ListDatabasesRequest(proto.Message):
@@ -226,15 +289,15 @@ class ListDatabasesRequest(proto.Message):
[ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- page_size = proto.Field(
+ page_size: int = proto.Field(
proto.INT32,
number=3,
)
- page_token = proto.Field(
+ page_token: str = proto.Field(
proto.STRING,
number=4,
)
@@ -245,7 +308,7 @@ class ListDatabasesResponse(proto.Message):
[ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
Attributes:
- databases (Sequence[google.cloud.spanner_admin_database_v1.types.Database]):
+ databases (MutableSequence[google.cloud.spanner_admin_database_v1.types.Database]):
Databases that matched the request.
next_page_token (str):
``next_page_token`` can be sent in a subsequent
@@ -257,12 +320,12 @@ class ListDatabasesResponse(proto.Message):
def raw_page(self):
return self
- databases = proto.RepeatedField(
+ databases: MutableSequence["Database"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Database",
)
- next_page_token = proto.Field(
+ next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
@@ -284,13 +347,15 @@ class CreateDatabaseRequest(proto.Message):
between 2 and 30 characters in length. If the database ID is
a reserved word or if it contains a hyphen, the database ID
must be enclosed in backticks (:literal:`\``).
- extra_statements (Sequence[str]):
+ extra_statements (MutableSequence[str]):
Optional. A list of DDL statements to run
inside the newly created database. Statements
can create tables, indexes, etc. These
statements execute atomically with the creation
- of the database: if there is an error in any
- statement, the database is not created.
+ of the database:
+
+ if there is an error in any statement, the
+ database is not created.
encryption_config (google.cloud.spanner_admin_database_v1.types.EncryptionConfig):
Optional. The encryption configuration for
the database. If this field is not specified,
@@ -299,30 +364,54 @@ class CreateDatabaseRequest(proto.Message):
database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect):
Optional. The dialect of the Cloud Spanner
Database.
+ proto_descriptors (bytes):
+ Optional. Proto descriptors used by CREATE/ALTER PROTO
+ BUNDLE statements in 'extra_statements' above. Contains a
+ protobuf-serialized
+ `google.protobuf.FileDescriptorSet `__.
+ To generate it,
+ `install `__ and
+ run ``protoc`` with --include_imports and
+ --descriptor_set_out. For example, to generate for
+ moon/shot/app.proto, run
+
+ ::
+
+ $protoc --proto_path=/app_path --proto_path=/lib_path \
+ --include_imports \
+ --descriptor_set_out=descriptors.data \
+ moon/shot/app.proto
+
+ For more details, see protobuffer `self
+ description `__.
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- create_statement = proto.Field(
+ create_statement: str = proto.Field(
proto.STRING,
number=2,
)
- extra_statements = proto.RepeatedField(
+ extra_statements: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
- encryption_config = proto.Field(
+ encryption_config: common.EncryptionConfig = proto.Field(
proto.MESSAGE,
number=4,
message=common.EncryptionConfig,
)
- database_dialect = proto.Field(
+ database_dialect: common.DatabaseDialect = proto.Field(
proto.ENUM,
number=5,
enum=common.DatabaseDialect,
)
+ proto_descriptors: bytes = proto.Field(
+ proto.BYTES,
+ number=6,
+ )
class CreateDatabaseMetadata(proto.Message):
@@ -334,7 +423,7 @@ class CreateDatabaseMetadata(proto.Message):
The database being created.
"""
- database = proto.Field(
+ database: str = proto.Field(
proto.STRING,
number=1,
)
@@ -351,12 +440,74 @@ class GetDatabaseRequest(proto.Message):
``projects//instances//databases/``.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
+class UpdateDatabaseRequest(proto.Message):
+ r"""The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+
+ Attributes:
+ database (google.cloud.spanner_admin_database_v1.types.Database):
+ Required. The database to update. The ``name`` field of the
+ database is of the form
+ ``projects//instances//databases/``.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The list of fields to update. Currently, only
+ ``enable_drop_protection`` field can be updated.
+ """
+
+ database: "Database" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Database",
+ )
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=field_mask_pb2.FieldMask,
+ )
+
+
+class UpdateDatabaseMetadata(proto.Message):
+ r"""Metadata type for the operation returned by
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+
+ Attributes:
+ request (google.cloud.spanner_admin_database_v1.types.UpdateDatabaseRequest):
+ The request for
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ progress (google.cloud.spanner_admin_database_v1.types.OperationProgress):
+ The progress of the
+ [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
+ operation.
+ cancel_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time at which this operation was
+ cancelled. If set, this operation is in the
+ process of undoing itself (which is
+ best-effort).
+ """
+
+ request: "UpdateDatabaseRequest" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="UpdateDatabaseRequest",
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=common.OperationProgress,
+ )
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
class UpdateDatabaseDdlRequest(proto.Message):
r"""Enqueues the given DDL statements to be applied, in order but not
necessarily all at once, to the database schema at some point (or
@@ -378,7 +529,7 @@ class UpdateDatabaseDdlRequest(proto.Message):
Attributes:
database (str):
Required. The database to update.
- statements (Sequence[str]):
+ statements (MutableSequence[str]):
Required. DDL statements to be applied to the
database.
operation_id (str):
@@ -403,17 +554,88 @@ class UpdateDatabaseDdlRequest(proto.Message):
underscore. If the named operation already exists,
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
returns ``ALREADY_EXISTS``.
+ proto_descriptors (bytes):
+ Optional. Proto descriptors used by CREATE/ALTER PROTO
+ BUNDLE statements. Contains a protobuf-serialized
+ `google.protobuf.FileDescriptorSet `__.
+ To generate it,
+ `install `__ and
+ run ``protoc`` with --include_imports and
+ --descriptor_set_out. For example, to generate for
+ moon/shot/app.proto, run
+
+ ::
+
+ $protoc --proto_path=/app_path --proto_path=/lib_path \
+ --include_imports \
+ --descriptor_set_out=descriptors.data \
+ moon/shot/app.proto
+
+ For more details, see protobuffer `self
+ description `__.
+ throughput_mode (bool):
+ Optional. This field is exposed to be used by the Spanner
+ Migration Tool. For more details, see
+ `SMT `__.
+ """
+
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ statements: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=2,
+ )
+ operation_id: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ proto_descriptors: bytes = proto.Field(
+ proto.BYTES,
+ number=4,
+ )
+ throughput_mode: bool = proto.Field(
+ proto.BOOL,
+ number=5,
+ )
+
+
+class DdlStatementActionInfo(proto.Message):
+ r"""Action information extracted from a DDL statement. This proto is
+ used to display the brief info of the DDL statement for the
+ operation
+ [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+
+ Attributes:
+ action (str):
+ The action for the DDL statement, e.g.
+ CREATE, ALTER, DROP, GRANT, etc. This field is a
+ non-empty string.
+ entity_type (str):
+ The entity type for the DDL statement, e.g. TABLE, INDEX,
+ VIEW, etc. This field can be empty string for some DDL
+ statement, e.g. for statement "ANALYZE", ``entity_type`` =
+ "".
+ entity_names (MutableSequence[str]):
+ The entity name(s) being operated on the DDL statement. E.g.
+
+ 1. For statement "CREATE TABLE t1(...)", ``entity_names`` =
+ ["t1"].
+ 2. For statement "GRANT ROLE r1, r2 ...", ``entity_names`` =
+ ["r1", "r2"].
+ 3. For statement "ANALYZE", ``entity_names`` = [].
"""
- database = proto.Field(
+ action: str = proto.Field(
proto.STRING,
number=1,
)
- statements = proto.RepeatedField(
+ entity_type: str = proto.Field(
proto.STRING,
number=2,
)
- operation_id = proto.Field(
+ entity_names: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
@@ -426,54 +648,61 @@ class UpdateDatabaseDdlMetadata(proto.Message):
Attributes:
database (str):
The database being modified.
- statements (Sequence[str]):
+ statements (MutableSequence[str]):
For an update this list contains all the
statements. For an individual statement, this
list contains only that statement.
- commit_timestamps (Sequence[google.protobuf.timestamp_pb2.Timestamp]):
+ commit_timestamps (MutableSequence[google.protobuf.timestamp_pb2.Timestamp]):
Reports the commit timestamps of all statements that have
succeeded so far, where ``commit_timestamps[i]`` is the
commit timestamp for the statement ``statements[i]``.
throttled (bool):
Output only. When true, indicates that the
- operation is throttled e.g due to resource
+ operation is throttled e.g. due to resource
constraints. When resources become available the
operation will resume and this field will be
false again.
- progress (Sequence[google.cloud.spanner_admin_database_v1.types.OperationProgress]):
+ progress (MutableSequence[google.cloud.spanner_admin_database_v1.types.OperationProgress]):
The progress of the
[UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
- operations. Currently, only index creation statements will
- have a continuously updating progress. For non-index
- creation statements, ``progress[i]`` will have start time
- and end time populated with commit timestamp of operation,
- as well as a progress of 100% once the operation has
- completed. ``progress[i]`` is the operation progress for
- ``statements[i]``.
+ operations. All DDL statements will have continuously
+ updating progress, and ``progress[i]`` is the operation
+ progress for ``statements[i]``. Also, ``progress[i]`` will
+ have start time and end time populated with commit timestamp
+ of operation, as well as a progress of 100% once the
+ operation has completed.
+ actions (MutableSequence[google.cloud.spanner_admin_database_v1.types.DdlStatementActionInfo]):
+ The brief action info for the DDL statements. ``actions[i]``
+ is the brief info for ``statements[i]``.
"""
- database = proto.Field(
+ database: str = proto.Field(
proto.STRING,
number=1,
)
- statements = proto.RepeatedField(
+ statements: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=2,
)
- commit_timestamps = proto.RepeatedField(
+ commit_timestamps: MutableSequence[timestamp_pb2.Timestamp] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
- throttled = proto.Field(
+ throttled: bool = proto.Field(
proto.BOOL,
number=4,
)
- progress = proto.RepeatedField(
+ progress: MutableSequence[common.OperationProgress] = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=common.OperationProgress,
)
+ actions: MutableSequence["DdlStatementActionInfo"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=6,
+ message="DdlStatementActionInfo",
+ )
class DropDatabaseRequest(proto.Message):
@@ -485,7 +714,7 @@ class DropDatabaseRequest(proto.Message):
Required. The database to be dropped.
"""
- database = proto.Field(
+ database: str = proto.Field(
proto.STRING,
number=1,
)
@@ -502,7 +731,7 @@ class GetDatabaseDdlRequest(proto.Message):
``projects//instances//databases/``
"""
- database = proto.Field(
+ database: str = proto.Field(
proto.STRING,
number=1,
)
@@ -513,16 +742,26 @@ class GetDatabaseDdlResponse(proto.Message):
[GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
Attributes:
- statements (Sequence[str]):
+ statements (MutableSequence[str]):
A list of formatted DDL statements defining
the schema of the database specified in the
request.
+ proto_descriptors (bytes):
+ Proto descriptors stored in the database. Contains a
+ protobuf-serialized
+ `google.protobuf.FileDescriptorSet `__.
+ For more details, see protobuffer `self
+ description `__.
"""
- statements = proto.RepeatedField(
+ statements: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
+ proto_descriptors: bytes = proto.Field(
+ proto.BYTES,
+ number=2,
+ )
class ListDatabaseOperationsRequest(proto.Message):
@@ -547,21 +786,21 @@ class ListDatabaseOperationsRequest(proto.Message):
[Operation][google.longrunning.Operation] are eligible for
filtering:
- - ``name`` - The name of the long-running operation
- - ``done`` - False if the operation is in progress, else
- true.
- - ``metadata.@type`` - the type of metadata. For example,
- the type string for
- [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
- is
- ``type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata``.
- - ``metadata.`` - any field in metadata.value.
- ``metadata.@type`` must be specified first, if filtering
- on metadata fields.
- - ``error`` - Error associated with the long-running
- operation.
- - ``response.@type`` - the type of response.
- - ``response.`` - any field in response.value.
+ - ``name`` - The name of the long-running operation
+ - ``done`` - False if the operation is in progress, else
+ true.
+ - ``metadata.@type`` - the type of metadata. For example,
+ the type string for
+ [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
+ is
+ ``type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata``.
+ - ``metadata.`` - any field in metadata.value.
+ ``metadata.@type`` must be specified first, if filtering
+ on metadata fields.
+ - ``error`` - Error associated with the long-running
+ operation.
+ - ``response.@type`` - the type of response.
+ - ``response.`` - any field in response.value.
You can combine multiple expressions by enclosing each
expression in parentheses. By default, expressions are
@@ -570,21 +809,21 @@ class ListDatabaseOperationsRequest(proto.Message):
Here are a few examples:
- - ``done:true`` - The operation is complete.
- - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND``
- ``(metadata.source_type:BACKUP) AND``
- ``(metadata.backup_info.backup:backup_howl) AND``
- ``(metadata.name:restored_howl) AND``
- ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
- ``(error:*)`` - Return operations where:
-
- - The operation's metadata type is
- [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
- - The database is restored from a backup.
- - The backup name contains "backup_howl".
- - The restored database's name contains "restored_howl".
- - The operation started before 2018-03-28T14:50:00Z.
- - The operation resulted in an error.
+ - ``done:true`` - The operation is complete.
+ - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND``
+ ``(metadata.source_type:BACKUP) AND``
+ ``(metadata.backup_info.backup:backup_howl) AND``
+ ``(metadata.name:restored_howl) AND``
+ ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND``
+ ``(error:*)`` - Return operations where:
+
+ - The operation's metadata type is
+ [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ - The database is restored from a backup.
+ - The backup name contains "backup_howl".
+ - The restored database's name contains "restored_howl".
+ - The operation started before 2018-03-28T14:50:00Z.
+ - The operation resulted in an error.
page_size (int):
Number of operations to be returned in the
response. If 0 or less, defaults to the server's
@@ -597,19 +836,19 @@ class ListDatabaseOperationsRequest(proto.Message):
to the same ``parent`` and with the same ``filter``.
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- filter = proto.Field(
+ filter: str = proto.Field(
proto.STRING,
number=2,
)
- page_size = proto.Field(
+ page_size: int = proto.Field(
proto.INT32,
number=3,
)
- page_token = proto.Field(
+ page_token: str = proto.Field(
proto.STRING,
number=4,
)
@@ -620,7 +859,7 @@ class ListDatabaseOperationsResponse(proto.Message):
[ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
Attributes:
- operations (Sequence[google.longrunning.operations_pb2.Operation]):
+ operations (MutableSequence[google.longrunning.operations_pb2.Operation]):
The list of matching database [long-running
operations][google.longrunning.Operation]. Each operation's
name will be prefixed by the database's name. The
@@ -637,12 +876,12 @@ class ListDatabaseOperationsResponse(proto.Message):
def raw_page(self):
return self
- operations = proto.RepeatedField(
+ operations: MutableSequence[operations_pb2.Operation] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=operations_pb2.Operation,
)
- next_page_token = proto.Field(
+ next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
@@ -684,20 +923,20 @@ class RestoreDatabaseRequest(proto.Message):
= ``USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION``.
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- database_id = proto.Field(
+ database_id: str = proto.Field(
proto.STRING,
number=2,
)
- backup = proto.Field(
+ backup: str = proto.Field(
proto.STRING,
number=3,
oneof="source",
)
- encryption_config = proto.Field(
+ encryption_config: "RestoreDatabaseEncryptionConfig" = proto.Field(
proto.MESSAGE,
number=4,
message="RestoreDatabaseEncryptionConfig",
@@ -718,24 +957,63 @@ class RestoreDatabaseEncryptionConfig(proto.Message):
[encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
is ``CUSTOMER_MANAGED_ENCRYPTION``. Values are of the form
``projects//locations//keyRings//cryptoKeys/``.
+ kms_key_names (MutableSequence[str]):
+ Optional. Specifies the KMS configuration for the one or
+ more keys used to encrypt the database. Values are of the
+ form
+ ``projects//locations//keyRings//cryptoKeys/``.
+
+ The keys referenced by kms_key_names must fully cover all
+ regions of the database instance configuration. Some
+ examples:
+
+ - For single region database instance configs, specify a
+ single regional location KMS key.
+ - For multi-regional database instance configs of type
+ GOOGLE_MANAGED, either specify a multi-regional location
+ KMS key or multiple regional location KMS keys that cover
+ all regions in the instance config.
+ - For a database instance config of type USER_MANAGED,
+ please specify only regional location KMS keys to cover
+ each region in the instance config. Multi-regional
+ location KMS keys are not supported for USER_MANAGED
+ instance configs.
"""
class EncryptionType(proto.Enum):
- r"""Encryption types for the database to be restored."""
+ r"""Encryption types for the database to be restored.
+
+ Values:
+ ENCRYPTION_TYPE_UNSPECIFIED (0):
+ Unspecified. Do not use.
+ USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION (1):
+ This is the default option when
+ [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
+ is not specified.
+ GOOGLE_DEFAULT_ENCRYPTION (2):
+ Use Google default encryption.
+ CUSTOMER_MANAGED_ENCRYPTION (3):
+ Use customer managed encryption. If specified,
+ ``kms_key_name`` must must contain a valid Cloud KMS key.
+ """
ENCRYPTION_TYPE_UNSPECIFIED = 0
USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1
GOOGLE_DEFAULT_ENCRYPTION = 2
CUSTOMER_MANAGED_ENCRYPTION = 3
- encryption_type = proto.Field(
+ encryption_type: EncryptionType = proto.Field(
proto.ENUM,
number=1,
enum=EncryptionType,
)
- kms_key_name = proto.Field(
+ kms_key_name: str = proto.Field(
proto.STRING,
number=2,
)
+ kms_key_names: MutableSequence[str] = proto.RepeatedField(
+ proto.STRING,
+ number=3,
+ )
class RestoreDatabaseMetadata(proto.Message):
@@ -791,32 +1069,32 @@ class RestoreDatabaseMetadata(proto.Message):
if the restore was not successful.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- source_type = proto.Field(
+ source_type: "RestoreSourceType" = proto.Field(
proto.ENUM,
number=2,
enum="RestoreSourceType",
)
- backup_info = proto.Field(
+ backup_info: gsad_backup.BackupInfo = proto.Field(
proto.MESSAGE,
number=3,
oneof="source_info",
message=gsad_backup.BackupInfo,
)
- progress = proto.Field(
+ progress: common.OperationProgress = proto.Field(
proto.MESSAGE,
number=4,
message=common.OperationProgress,
)
- cancel_time = proto.Field(
+ cancel_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
- optimize_database_operation_name = proto.Field(
+ optimize_database_operation_name: str = proto.Field(
proto.STRING,
number=6,
)
@@ -838,11 +1116,11 @@ class OptimizeRestoredDatabaseMetadata(proto.Message):
optimizations.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
- progress = proto.Field(
+ progress: common.OperationProgress = proto.Field(
proto.MESSAGE,
number=2,
message=common.OperationProgress,
@@ -856,13 +1134,12 @@ class DatabaseRole(proto.Message):
name (str):
Required. The name of the database role. Values are of the
form
- ``projects//instances//databases//databaseRoles/ {role}``,
+ ``projects//instances//databases//databaseRoles/``
where ```` is as specified in the ``CREATE ROLE`` DDL
- statement. This name can be passed to Get/Set IAMPolicy
- methods to identify the database role.
+ statement.
"""
- name = proto.Field(
+ name: str = proto.Field(
proto.STRING,
number=1,
)
@@ -876,7 +1153,7 @@ class ListDatabaseRolesRequest(proto.Message):
parent (str):
Required. The database whose roles should be listed. Values
are of the form
- ``projects//instances//databases//databaseRoles``.
+ ``projects//instances//databases/``.
page_size (int):
Number of database roles to be returned in
the response. If 0 or less, defaults to the
@@ -888,15 +1165,15 @@ class ListDatabaseRolesRequest(proto.Message):
[ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
"""
- parent = proto.Field(
+ parent: str = proto.Field(
proto.STRING,
number=1,
)
- page_size = proto.Field(
+ page_size: int = proto.Field(
proto.INT32,
number=2,
)
- page_token = proto.Field(
+ page_token: str = proto.Field(
proto.STRING,
number=3,
)
@@ -907,7 +1184,7 @@ class ListDatabaseRolesResponse(proto.Message):
[ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
Attributes:
- database_roles (Sequence[google.cloud.spanner_admin_database_v1.types.DatabaseRole]):
+ database_roles (MutableSequence[google.cloud.spanner_admin_database_v1.types.DatabaseRole]):
Database roles that matched the request.
next_page_token (str):
``next_page_token`` can be sent in a subsequent
@@ -919,15 +1196,154 @@ class ListDatabaseRolesResponse(proto.Message):
def raw_page(self):
return self
- database_roles = proto.RepeatedField(
+ database_roles: MutableSequence["DatabaseRole"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="DatabaseRole",
)
- next_page_token = proto.Field(
+ next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
+class AddSplitPointsRequest(proto.Message):
+ r"""The request for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ Attributes:
+ database (str):
+ Required. The database on whose tables/indexes split points
+ are to be added. Values are of the form
+ ``projects//instances//databases/``.
+ split_points (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]):
+ Required. The split points to add.
+ initiator (str):
+ Optional. A user-supplied tag associated with the split
+ points. For example, "intital_data_load", "special_event_1".
+ Defaults to "CloudAddSplitPointsAPI" if not specified. The
+ length of the tag must not exceed 50 characters,else will be
+ trimmed. Only valid UTF8 characters are allowed.
+ """
+
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ split_points: MutableSequence["SplitPoints"] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=2,
+ message="SplitPoints",
+ )
+ initiator: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
+class AddSplitPointsResponse(proto.Message):
+ r"""The response for
+ [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints].
+
+ """
+
+
+class SplitPoints(proto.Message):
+ r"""The split points of a table/index.
+
+ Attributes:
+ table (str):
+ The table to split.
+ index (str):
+ The index to split. If specified, the ``table`` field must
+ refer to the index's base table.
+ keys (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints.Key]):
+ Required. The list of split keys, i.e., the
+ split boundaries.
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Optional. The expiration timestamp of the
+ split points. A timestamp in the past means
+ immediate expiration. The maximum value can be
+ 30 days in the future. Defaults to 10 days in
+ the future if not specified.
+ """
+
+ class Key(proto.Message):
+ r"""A split key.
+
+ Attributes:
+ key_parts (google.protobuf.struct_pb2.ListValue):
+ Required. The column values making up the
+ split key.
+ """
+
+ key_parts: struct_pb2.ListValue = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=struct_pb2.ListValue,
+ )
+
+ table: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ index: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ keys: MutableSequence[Key] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=3,
+ message=Key,
+ )
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class InternalUpdateGraphOperationRequest(proto.Message):
+ r"""Internal request proto, do not use directly.
+
+ Attributes:
+ database (str):
+ Internal field, do not use directly.
+ operation_id (str):
+ Internal field, do not use directly.
+ vm_identity_token (str):
+ Internal field, do not use directly.
+ progress (float):
+ Internal field, do not use directly.
+ status (google.rpc.status_pb2.Status):
+ Internal field, do not use directly.
+ """
+
+ database: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ operation_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ vm_identity_token: str = proto.Field(
+ proto.STRING,
+ number=5,
+ )
+ progress: float = proto.Field(
+ proto.DOUBLE,
+ number=3,
+ )
+ status: status_pb2.Status = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ message=status_pb2.Status,
+ )
+
+
+class InternalUpdateGraphOperationResponse(proto.Message):
+ r"""Internal response proto, do not use directly."""
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/spanner_admin_instance_v1/__init__.py b/google/cloud/spanner_admin_instance_v1/__init__.py
index c641cd061c..261949561f 100644
--- a/google/cloud/spanner_admin_instance_v1/__init__.py
+++ b/google/cloud/spanner_admin_instance_v1/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,40 +13,202 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from google.cloud.spanner_admin_instance_v1 import gapic_version as package_version
+
+import google.api_core as api_core
+import sys
+
+__version__ = package_version.__version__
+
+if sys.version_info >= (3, 8): # pragma: NO COVER
+ from importlib import metadata
+else: # pragma: NO COVER
+ # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove
+ # this code path once we drop support for Python 3.7
+ import importlib_metadata as metadata
+
from .services.instance_admin import InstanceAdminClient
from .services.instance_admin import InstanceAdminAsyncClient
+from .types.common import OperationProgress
+from .types.common import ReplicaSelection
+from .types.common import FulfillmentPeriod
+from .types.spanner_instance_admin import AutoscalingConfig
+from .types.spanner_instance_admin import CreateInstanceConfigMetadata
+from .types.spanner_instance_admin import CreateInstanceConfigRequest
from .types.spanner_instance_admin import CreateInstanceMetadata
+from .types.spanner_instance_admin import CreateInstancePartitionMetadata
+from .types.spanner_instance_admin import CreateInstancePartitionRequest
from .types.spanner_instance_admin import CreateInstanceRequest
+from .types.spanner_instance_admin import DeleteInstanceConfigRequest
+from .types.spanner_instance_admin import DeleteInstancePartitionRequest
from .types.spanner_instance_admin import DeleteInstanceRequest
+from .types.spanner_instance_admin import FreeInstanceMetadata
from .types.spanner_instance_admin import GetInstanceConfigRequest
+from .types.spanner_instance_admin import GetInstancePartitionRequest
from .types.spanner_instance_admin import GetInstanceRequest
from .types.spanner_instance_admin import Instance
from .types.spanner_instance_admin import InstanceConfig
+from .types.spanner_instance_admin import InstancePartition
+from .types.spanner_instance_admin import ListInstanceConfigOperationsRequest
+from .types.spanner_instance_admin import ListInstanceConfigOperationsResponse
from .types.spanner_instance_admin import ListInstanceConfigsRequest
from .types.spanner_instance_admin import ListInstanceConfigsResponse
+from .types.spanner_instance_admin import ListInstancePartitionOperationsRequest
+from .types.spanner_instance_admin import ListInstancePartitionOperationsResponse
+from .types.spanner_instance_admin import ListInstancePartitionsRequest
+from .types.spanner_instance_admin import ListInstancePartitionsResponse
from .types.spanner_instance_admin import ListInstancesRequest
from .types.spanner_instance_admin import ListInstancesResponse
+from .types.spanner_instance_admin import MoveInstanceMetadata
+from .types.spanner_instance_admin import MoveInstanceRequest
+from .types.spanner_instance_admin import MoveInstanceResponse
+from .types.spanner_instance_admin import ReplicaComputeCapacity
from .types.spanner_instance_admin import ReplicaInfo
+from .types.spanner_instance_admin import UpdateInstanceConfigMetadata
+from .types.spanner_instance_admin import UpdateInstanceConfigRequest
from .types.spanner_instance_admin import UpdateInstanceMetadata
+from .types.spanner_instance_admin import UpdateInstancePartitionMetadata
+from .types.spanner_instance_admin import UpdateInstancePartitionRequest
from .types.spanner_instance_admin import UpdateInstanceRequest
+if hasattr(api_core, "check_python_version") and hasattr(
+ api_core, "check_dependency_versions"
+): # pragma: NO COVER
+ api_core.check_python_version("google.cloud.spanner_admin_instance_v1") # type: ignore
+ api_core.check_dependency_versions("google.cloud.spanner_admin_instance_v1") # type: ignore
+else: # pragma: NO COVER
+ # An older version of api_core is installed which does not define the
+ # functions above. We do equivalent checks manually.
+ try:
+ import warnings
+ import sys
+
+ _py_version_str = sys.version.split()[0]
+ _package_label = "google.cloud.spanner_admin_instance_v1"
+ if sys.version_info < (3, 9):
+ warnings.warn(
+ "You are using a non-supported Python version "
+ + f"({_py_version_str}). Google will not post any further "
+ + f"updates to {_package_label} supporting this Python version. "
+ + "Please upgrade to the latest Python version, or at "
+ + f"least to Python 3.9, and then update {_package_label}.",
+ FutureWarning,
+ )
+ if sys.version_info[:2] == (3, 9):
+ warnings.warn(
+ f"You are using a Python version ({_py_version_str}) "
+ + f"which Google will stop supporting in {_package_label} in "
+ + "January 2026. Please "
+ + "upgrade to the latest Python version, or at "
+ + "least to Python 3.10, before then, and "
+ + f"then update {_package_label}.",
+ FutureWarning,
+ )
+
+ def parse_version_to_tuple(version_string: str):
+ """Safely converts a semantic version string to a comparable tuple of integers.
+ Example: "4.25.8" -> (4, 25, 8)
+ Ignores non-numeric parts and handles common version formats.
+ Args:
+ version_string: Version string in the format "x.y.z" or "x.y.z"
+ Returns:
+ Tuple of integers for the parsed version string.
+ """
+ parts = []
+ for part in version_string.split("."):
+ try:
+ parts.append(int(part))
+ except ValueError:
+ # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here.
+ # This is a simplification compared to 'packaging.parse_version', but sufficient
+ # for comparing strictly numeric semantic versions.
+ break
+ return tuple(parts)
+
+ def _get_version(dependency_name):
+ try:
+ version_string: str = metadata.version(dependency_name)
+ parsed_version = parse_version_to_tuple(version_string)
+ return (parsed_version, version_string)
+ except Exception:
+ # Catch exceptions from metadata.version() (e.g., PackageNotFoundError)
+ # or errors during parse_version_to_tuple
+ return (None, "--")
+
+ _dependency_package = "google.protobuf"
+ _next_supported_version = "4.25.8"
+ _next_supported_version_tuple = (4, 25, 8)
+ _recommendation = " (we recommend 6.x)"
+ (_version_used, _version_used_string) = _get_version(_dependency_package)
+ if _version_used and _version_used < _next_supported_version_tuple:
+ warnings.warn(
+ f"Package {_package_label} depends on "
+ + f"{_dependency_package}, currently installed at version "
+ + f"{_version_used_string}. Future updates to "
+ + f"{_package_label} will require {_dependency_package} at "
+ + f"version {_next_supported_version} or higher{_recommendation}."
+ + " Please ensure "
+ + "that either (a) your Python environment doesn't pin the "
+ + f"version of {_dependency_package}, so that updates to "
+ + f"{_package_label} can require the higher version, or "
+ + "(b) you manually update your Python environment to use at "
+ + f"least version {_next_supported_version} of "
+ + f"{_dependency_package}.",
+ FutureWarning,
+ )
+ except Exception:
+ warnings.warn(
+ "Could not determine the version of Python "
+ + "currently being used. To continue receiving "
+ + "updates for {_package_label}, ensure you are "
+ + "using a supported version of Python; see "
+ + "https://devguide.python.org/versions/"
+ )
+
__all__ = (
"InstanceAdminAsyncClient",
+ "AutoscalingConfig",
+ "CreateInstanceConfigMetadata",
+ "CreateInstanceConfigRequest",
"CreateInstanceMetadata",
+ "CreateInstancePartitionMetadata",
+ "CreateInstancePartitionRequest",
"CreateInstanceRequest",
+ "DeleteInstanceConfigRequest",
+ "DeleteInstancePartitionRequest",
"DeleteInstanceRequest",
+ "FreeInstanceMetadata",
+ "FulfillmentPeriod",
"GetInstanceConfigRequest",
+ "GetInstancePartitionRequest",
"GetInstanceRequest",
"Instance",
"InstanceAdminClient",
"InstanceConfig",
+ "InstancePartition",
+ "ListInstanceConfigOperationsRequest",
+ "ListInstanceConfigOperationsResponse",
"ListInstanceConfigsRequest",
"ListInstanceConfigsResponse",
+ "ListInstancePartitionOperationsRequest",
+ "ListInstancePartitionOperationsResponse",
+ "ListInstancePartitionsRequest",
+ "ListInstancePartitionsResponse",
"ListInstancesRequest",
"ListInstancesResponse",
+ "MoveInstanceMetadata",
+ "MoveInstanceRequest",
+ "MoveInstanceResponse",
+ "OperationProgress",
+ "ReplicaComputeCapacity",
"ReplicaInfo",
+ "ReplicaSelection",
+ "UpdateInstanceConfigMetadata",
+ "UpdateInstanceConfigRequest",
"UpdateInstanceMetadata",
+ "UpdateInstancePartitionMetadata",
+ "UpdateInstancePartitionRequest",
"UpdateInstanceRequest",
)
diff --git a/google/cloud/spanner_admin_instance_v1/gapic_metadata.json b/google/cloud/spanner_admin_instance_v1/gapic_metadata.json
index 6fee5bcd53..60fa46718a 100644
--- a/google/cloud/spanner_admin_instance_v1/gapic_metadata.json
+++ b/google/cloud/spanner_admin_instance_v1/gapic_metadata.json
@@ -15,11 +15,31 @@
"create_instance"
]
},
+ "CreateInstanceConfig": {
+ "methods": [
+ "create_instance_config"
+ ]
+ },
+ "CreateInstancePartition": {
+ "methods": [
+ "create_instance_partition"
+ ]
+ },
"DeleteInstance": {
"methods": [
"delete_instance"
]
},
+ "DeleteInstanceConfig": {
+ "methods": [
+ "delete_instance_config"
+ ]
+ },
+ "DeleteInstancePartition": {
+ "methods": [
+ "delete_instance_partition"
+ ]
+ },
"GetIamPolicy": {
"methods": [
"get_iam_policy"
@@ -35,16 +55,41 @@
"get_instance_config"
]
},
+ "GetInstancePartition": {
+ "methods": [
+ "get_instance_partition"
+ ]
+ },
+ "ListInstanceConfigOperations": {
+ "methods": [
+ "list_instance_config_operations"
+ ]
+ },
"ListInstanceConfigs": {
"methods": [
"list_instance_configs"
]
},
+ "ListInstancePartitionOperations": {
+ "methods": [
+ "list_instance_partition_operations"
+ ]
+ },
+ "ListInstancePartitions": {
+ "methods": [
+ "list_instance_partitions"
+ ]
+ },
"ListInstances": {
"methods": [
"list_instances"
]
},
+ "MoveInstance": {
+ "methods": [
+ "move_instance"
+ ]
+ },
"SetIamPolicy": {
"methods": [
"set_iam_policy"
@@ -59,6 +104,16 @@
"methods": [
"update_instance"
]
+ },
+ "UpdateInstanceConfig": {
+ "methods": [
+ "update_instance_config"
+ ]
+ },
+ "UpdateInstancePartition": {
+ "methods": [
+ "update_instance_partition"
+ ]
}
}
},
@@ -70,11 +125,31 @@
"create_instance"
]
},
+ "CreateInstanceConfig": {
+ "methods": [
+ "create_instance_config"
+ ]
+ },
+ "CreateInstancePartition": {
+ "methods": [
+ "create_instance_partition"
+ ]
+ },
"DeleteInstance": {
"methods": [
"delete_instance"
]
},
+ "DeleteInstanceConfig": {
+ "methods": [
+ "delete_instance_config"
+ ]
+ },
+ "DeleteInstancePartition": {
+ "methods": [
+ "delete_instance_partition"
+ ]
+ },
"GetIamPolicy": {
"methods": [
"get_iam_policy"
@@ -90,16 +165,41 @@
"get_instance_config"
]
},
+ "GetInstancePartition": {
+ "methods": [
+ "get_instance_partition"
+ ]
+ },
+ "ListInstanceConfigOperations": {
+ "methods": [
+ "list_instance_config_operations"
+ ]
+ },
"ListInstanceConfigs": {
"methods": [
"list_instance_configs"
]
},
+ "ListInstancePartitionOperations": {
+ "methods": [
+ "list_instance_partition_operations"
+ ]
+ },
+ "ListInstancePartitions": {
+ "methods": [
+ "list_instance_partitions"
+ ]
+ },
"ListInstances": {
"methods": [
"list_instances"
]
},
+ "MoveInstance": {
+ "methods": [
+ "move_instance"
+ ]
+ },
"SetIamPolicy": {
"methods": [
"set_iam_policy"
@@ -114,6 +214,126 @@
"methods": [
"update_instance"
]
+ },
+ "UpdateInstanceConfig": {
+ "methods": [
+ "update_instance_config"
+ ]
+ },
+ "UpdateInstancePartition": {
+ "methods": [
+ "update_instance_partition"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "InstanceAdminClient",
+ "rpcs": {
+ "CreateInstance": {
+ "methods": [
+ "create_instance"
+ ]
+ },
+ "CreateInstanceConfig": {
+ "methods": [
+ "create_instance_config"
+ ]
+ },
+ "CreateInstancePartition": {
+ "methods": [
+ "create_instance_partition"
+ ]
+ },
+ "DeleteInstance": {
+ "methods": [
+ "delete_instance"
+ ]
+ },
+ "DeleteInstanceConfig": {
+ "methods": [
+ "delete_instance_config"
+ ]
+ },
+ "DeleteInstancePartition": {
+ "methods": [
+ "delete_instance_partition"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "get_iam_policy"
+ ]
+ },
+ "GetInstance": {
+ "methods": [
+ "get_instance"
+ ]
+ },
+ "GetInstanceConfig": {
+ "methods": [
+ "get_instance_config"
+ ]
+ },
+ "GetInstancePartition": {
+ "methods": [
+ "get_instance_partition"
+ ]
+ },
+ "ListInstanceConfigOperations": {
+ "methods": [
+ "list_instance_config_operations"
+ ]
+ },
+ "ListInstanceConfigs": {
+ "methods": [
+ "list_instance_configs"
+ ]
+ },
+ "ListInstancePartitionOperations": {
+ "methods": [
+ "list_instance_partition_operations"
+ ]
+ },
+ "ListInstancePartitions": {
+ "methods": [
+ "list_instance_partitions"
+ ]
+ },
+ "ListInstances": {
+ "methods": [
+ "list_instances"
+ ]
+ },
+ "MoveInstance": {
+ "methods": [
+ "move_instance"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "set_iam_policy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "test_iam_permissions"
+ ]
+ },
+ "UpdateInstance": {
+ "methods": [
+ "update_instance"
+ ]
+ },
+ "UpdateInstanceConfig": {
+ "methods": [
+ "update_instance_config"
+ ]
+ },
+ "UpdateInstancePartition": {
+ "methods": [
+ "update_instance_partition"
+ ]
}
}
}
diff --git a/.github/.OwlBot.lock.yaml b/google/cloud/spanner_admin_instance_v1/gapic_version.py
similarity index 67%
rename from .github/.OwlBot.lock.yaml
rename to google/cloud/spanner_admin_instance_v1/gapic_version.py
index b8dcb4a4af..bf54fc40ae 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/google/cloud/spanner_admin_instance_v1/gapic_version.py
@@ -1,16 +1,16 @@
-# Copyright 2022 Google LLC
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-docker:
- image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:993a058718e84a82fda04c3177e58f0a43281a996c7c395e0a56ccc4d6d210d7
+#
+__version__ = "3.63.0" # {x-release-please-version}
diff --git a/google/cloud/spanner_admin_instance_v1/services/__init__.py b/google/cloud/spanner_admin_instance_v1/services/__init__.py
index e8e1c3845d..cbf94b283c 100644
--- a/google/cloud/spanner_admin_instance_v1/services/__init__.py
+++ b/google/cloud/spanner_admin_instance_v1/services/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py
index 15f143a119..51df22ca2e 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
index 28d1098417..1e87fc5a63 100644
--- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
+++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,23 +13,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import logging as std_logging
from collections import OrderedDict
-import functools
import re
-from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
-import pkg_resources
+from typing import (
+ Dict,
+ Callable,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+import uuid
+
+from google.cloud.spanner_admin_instance_v1 import gapic_version as package_version
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
-from google.api_core import retry as retries
+from google.api_core import retry_async as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+import google.protobuf
+
try:
- OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
except AttributeError: # pragma: NO COVER
- OptionalRetry = Union[retries.Retry, object] # type: ignore
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -37,19 +52,32 @@
from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import InstanceAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import InstanceAdminGrpcAsyncIOTransport
from .client import InstanceAdminClient
+try:
+ from google.api_core import client_logging # type: ignore
+
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
+except ImportError: # pragma: NO COVER
+ CLIENT_LOGGING_SUPPORTED = False
+
+_LOGGER = std_logging.getLogger(__name__)
+
class InstanceAdminAsyncClient:
"""Cloud Spanner Instance Admin API
+
The Cloud Spanner Instance Admin API can be used to create,
delete, modify and list instances. Instances are dedicated Cloud
Spanner serving and storage resources to be used by Cloud
Spanner databases.
+
Each instance has a "configuration", which dictates where the
serving resources for the Cloud Spanner instance are located
(e.g., US-central, Europe). Configurations are created by Google
@@ -70,8 +98,12 @@ class InstanceAdminAsyncClient:
_client: InstanceAdminClient
+ # Copy defaults from the synchronous client for use here.
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
DEFAULT_ENDPOINT = InstanceAdminClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = InstanceAdminClient.DEFAULT_MTLS_ENDPOINT
+ _DEFAULT_ENDPOINT_TEMPLATE = InstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE
+ _DEFAULT_UNIVERSE = InstanceAdminClient._DEFAULT_UNIVERSE
instance_path = staticmethod(InstanceAdminClient.instance_path)
parse_instance_path = staticmethod(InstanceAdminClient.parse_instance_path)
@@ -79,6 +111,10 @@ class InstanceAdminAsyncClient:
parse_instance_config_path = staticmethod(
InstanceAdminClient.parse_instance_config_path
)
+ instance_partition_path = staticmethod(InstanceAdminClient.instance_partition_path)
+ parse_instance_partition_path = staticmethod(
+ InstanceAdminClient.parse_instance_partition_path
+ )
common_billing_account_path = staticmethod(
InstanceAdminClient.common_billing_account_path
)
@@ -153,7 +189,7 @@ def get_mtls_endpoint_and_cert_source(
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
- default mTLS endpoint; if the environment variabel is "never", use the default API
+ default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
@@ -182,19 +218,38 @@ def transport(self) -> InstanceAdminTransport:
"""
return self._client.transport
- get_transport_class = functools.partial(
- type(InstanceAdminClient).get_transport_class, type(InstanceAdminClient)
- )
+ @property
+ def api_endpoint(self):
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._client._api_endpoint
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used
+ by the client instance.
+ """
+ return self._client._universe_domain
+
+ get_transport_class = InstanceAdminClient.get_transport_class
def __init__(
self,
*,
- credentials: ga_credentials.Credentials = None,
- transport: Union[str, InstanceAdminTransport] = "grpc_asyncio",
- client_options: ClientOptions = None,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[str, InstanceAdminTransport, Callable[..., InstanceAdminTransport]]
+ ] = "grpc_asyncio",
+ client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiates the instance admin client.
+ """Instantiates the instance admin async client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -202,26 +257,43 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, ~.InstanceAdminTransport]): The
- transport to use. If set to None, a transport is chosen
- automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
- (1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
- environment variable can also be used to override the endpoint:
+ transport (Optional[Union[str,InstanceAdminTransport,Callable[..., InstanceAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the InstanceAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint) and "auto" (auto switch to the
- default mTLS endpoint if client certificate is present, this is
- the default value). However, the ``api_endpoint`` property takes
- precedence if provided.
- (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
- to provide client certificate for mutual TLS transport. If
+ to provide a client certificate for mTLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
@@ -233,17 +305,43 @@ def __init__(
client_info=client_info,
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ std_logging.DEBUG
+ ): # pragma: NO COVER
+ _LOGGER.debug(
+ "Created client `google.spanner.admin.instance_v1.InstanceAdminAsyncClient`.",
+ extra={
+ "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin",
+ "universeDomain": getattr(
+ self._client._transport._credentials, "universe_domain", ""
+ ),
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
+ "credentialsInfo": getattr(
+ self.transport._credentials, "get_cred_info", lambda: None
+ )(),
+ }
+ if hasattr(self._client._transport, "_credentials")
+ else {
+ "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin",
+ "credentialsType": None,
+ },
+ )
+
async def list_instance_configs(
self,
- request: Union[spanner_instance_admin.ListInstanceConfigsRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.ListInstanceConfigsRequest, dict]
+ ] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> pagers.ListInstanceConfigsAsyncPager:
r"""Lists the supported instance configurations for a
given project.
+ Returns both Google-managed configurations and
+ user-managed configurations.
.. code-block:: python
@@ -273,7 +371,7 @@ async def sample_list_instance_configs():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_instance_v1.types.ListInstanceConfigsRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.ListInstanceConfigsRequest, dict]]):
The request object. The request for
[ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
parent (:class:`str`):
@@ -284,32 +382,40 @@ async def sample_list_instance_configs():
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigsAsyncPager:
The response for
- [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+ [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.ListInstanceConfigsRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.ListInstanceConfigsRequest):
+ request = spanner_instance_admin.ListInstanceConfigsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -318,21 +424,9 @@ async def sample_list_instance_configs():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_instance_configs,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_instance_configs
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -340,6 +434,9 @@ async def sample_list_instance_configs():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -354,6 +451,8 @@ async def sample_list_instance_configs():
method=rpc,
request=request,
response=response,
+ retry=retry,
+ timeout=timeout,
metadata=metadata,
)
@@ -362,12 +461,14 @@ async def sample_list_instance_configs():
async def get_instance_config(
self,
- request: Union[spanner_instance_admin.GetInstanceConfigRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.GetInstanceConfigRequest, dict]
+ ] = None,
*,
- name: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> spanner_instance_admin.InstanceConfig:
r"""Gets information about a particular instance
configuration.
@@ -399,7 +500,7 @@ async def sample_get_instance_config():
print(response)
Args:
- request (Union[google.cloud.spanner_admin_instance_v1.types.GetInstanceConfigRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.GetInstanceConfigRequest, dict]]):
The request object. The request for
[GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
name (:class:`str`):
@@ -410,11 +511,13 @@ async def sample_get_instance_config():
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
google.cloud.spanner_admin_instance_v1.types.InstanceConfig:
@@ -425,16 +528,22 @@ async def sample_get_instance_config():
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.GetInstanceConfigRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.GetInstanceConfigRequest):
+ request = spanner_instance_admin.GetInstanceConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
@@ -443,21 +552,9 @@ async def sample_get_instance_config():
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_instance_config,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_instance_config
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -465,6 +562,9 @@ async def sample_get_instance_config():
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -476,16 +576,59 @@ async def sample_get_instance_config():
# Done; return the response.
return response
- async def list_instances(
+ async def create_instance_config(
self,
- request: Union[spanner_instance_admin.ListInstancesRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.CreateInstanceConfigRequest, dict]
+ ] = None,
*,
- parent: str = None,
+ parent: Optional[str] = None,
+ instance_config: Optional[spanner_instance_admin.InstanceConfig] = None,
+ instance_config_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> pagers.ListInstancesAsyncPager:
- r"""Lists all instances in the given project.
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates an instance configuration and begins preparing it to be
+ used. The returned long-running operation can be used to track
+ the progress of preparing the new instance configuration. The
+ instance configuration name is assigned by the caller. If the
+ named instance configuration already exists,
+ ``CreateInstanceConfig`` returns ``ALREADY_EXISTS``.
+
+ Immediately after the request returns:
+
+ - The instance configuration is readable via the API, with all
+ requested attributes. The instance configuration's
+ [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ field is set to true. Its state is ``CREATING``.
+
+ While the operation is pending:
+
+ - Cancelling the operation renders the instance configuration
+ immediately unreadable via the API.
+ - Except for deleting the creating resource, all other attempts
+ to modify the instance configuration are rejected.
+
+ Upon completion of the returned operation:
+
+ - Instances can be created using the instance configuration.
+ - The instance configuration's
+ [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ field becomes false. Its state becomes ``READY``.
+
+ The returned long-running operation will have a name of the
+ format ``/operations/`` and
+ can be used to track creation of the instance configuration. The
+ metadata field type is
+ [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
+ The response field type is
+ [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig],
+ if successful.
+
+ Authorization requires ``spanner.instanceConfigs.create``
+ permission on the resource
+ [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
.. code-block:: python
@@ -498,83 +641,109 @@ async def list_instances(
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_instance_v1
- async def sample_list_instances():
+ async def sample_create_instance_config():
# Create a client
client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
# Initialize request argument(s)
- request = spanner_admin_instance_v1.ListInstancesRequest(
+ request = spanner_admin_instance_v1.CreateInstanceConfigRequest(
parent="parent_value",
+ instance_config_id="instance_config_id_value",
)
# Make the request
- page_result = client.list_instances(request=request)
+ operation = client.create_instance_config(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
# Handle the response
- async for response in page_result:
- print(response)
+ print(response)
Args:
- request (Union[google.cloud.spanner_admin_instance_v1.types.ListInstancesRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.CreateInstanceConfigRequest, dict]]):
The request object. The request for
- [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+ [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
parent (:class:`str`):
- Required. The name of the project for which a list of
- instances is requested. Values are of the form
+ Required. The name of the project in which to create the
+ instance configuration. Values are of the form
``projects/``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ instance_config (:class:`google.cloud.spanner_admin_instance_v1.types.InstanceConfig`):
+ Required. The ``InstanceConfig`` proto of the
+ configuration to create. ``instance_config.name`` must
+ be ``/instanceConfigs/``.
+ ``instance_config.base_config`` must be a Google-managed
+ configuration name, e.g. /instanceConfigs/us-east1,
+ /instanceConfigs/nam3.
+
+ This corresponds to the ``instance_config`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance_config_id (:class:`str`):
+ Required. The ID of the instance configuration to
+ create. Valid identifiers are of the form
+ ``custom-[-a-z0-9]*[a-z0-9]`` and must be between 2 and
+ 64 characters in length. The ``custom-`` prefix is
+ required to avoid name conflicts with Google-managed
+ configurations.
+
+ This corresponds to the ``instance_config_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
- google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancesAsyncPager:
- The response for
- [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
- Iterating over this object will yield results and
- resolve additional pages automatically.
+ The result type for the operation will be :class:`google.cloud.spanner_admin_instance_v1.types.InstanceConfig` A possible configuration for a Cloud Spanner instance. Configurations
+ define the geographic placement of nodes and their
+ replication.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, instance_config, instance_config_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.ListInstancesRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.CreateInstanceConfigRequest):
+ request = spanner_instance_admin.CreateInstanceConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
+ if instance_config is not None:
+ request.instance_config = instance_config
+ if instance_config_id is not None:
+ request.instance_config_id = instance_config_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.list_instances,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_instance_config
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -582,6 +751,9 @@ async def sample_list_instances():
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -590,28 +762,75 @@ async def sample_list_instances():
metadata=metadata,
)
- # This method is paged; wrap the response in a pager, which provides
- # an `__aiter__` convenience method.
- response = pagers.ListInstancesAsyncPager(
- method=rpc,
- request=request,
- response=response,
- metadata=metadata,
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_instance_admin.InstanceConfig,
+ metadata_type=spanner_instance_admin.CreateInstanceConfigMetadata,
)
# Done; return the response.
return response
- async def get_instance(
+ async def update_instance_config(
self,
- request: Union[spanner_instance_admin.GetInstanceRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.UpdateInstanceConfigRequest, dict]
+ ] = None,
*,
- name: str = None,
+ instance_config: Optional[spanner_instance_admin.InstanceConfig] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> spanner_instance_admin.Instance:
- r"""Gets information about a particular instance.
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates an instance configuration. The returned long-running
+ operation can be used to track the progress of updating the
+ instance. If the named instance configuration does not exist,
+ returns ``NOT_FOUND``.
+
+ Only user-managed configurations can be updated.
+
+ Immediately after the request returns:
+
+ - The instance configuration's
+ [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ field is set to true.
+
+ While the operation is pending:
+
+ - Cancelling the operation sets its metadata's
+ [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
+ The operation is guaranteed to succeed at undoing all changes,
+ after which point it terminates with a ``CANCELLED`` status.
+ - All other attempts to modify the instance configuration are
+ rejected.
+ - Reading the instance configuration via the API continues to
+ give the pre-request values.
+
+ Upon completion of the returned operation:
+
+ - Creating instances using the instance configuration uses the
+ new values.
+ - The new values of the instance configuration are readable via
+ the API.
+ - The instance configuration's
+ [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ field becomes false.
+
+ The returned long-running operation will have a name of the
+ format ``/operations/`` and
+ can be used to track the instance configuration modification.
+ The metadata field type is
+ [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
+ The response field type is
+ [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig],
+ if successful.
+
+ Authorization requires ``spanner.instanceConfigs.update``
+ permission on the resource
+ [name][google.spanner.admin.instance.v1.InstanceConfig.name].
.. code-block:: python
@@ -624,86 +843,113 @@ async def get_instance(
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_instance_v1
- async def sample_get_instance():
+ async def sample_update_instance_config():
# Create a client
client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
# Initialize request argument(s)
- request = spanner_admin_instance_v1.GetInstanceRequest(
- name="name_value",
+ request = spanner_admin_instance_v1.UpdateInstanceConfigRequest(
)
# Make the request
- response = await client.get_instance(request=request)
+ operation = client.update_instance_config(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
# Handle the response
print(response)
Args:
- request (Union[google.cloud.spanner_admin_instance_v1.types.GetInstanceRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.UpdateInstanceConfigRequest, dict]]):
The request object. The request for
- [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
- name (:class:`str`):
- Required. The name of the requested instance. Values are
- of the form ``projects//instances/``.
+ [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
+ instance_config (:class:`google.cloud.spanner_admin_instance_v1.types.InstanceConfig`):
+ Required. The user instance configuration to update,
+ which must always include the instance configuration
+ name. Otherwise, only fields mentioned in
+ [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
+ need be included. To prevent conflicts of concurrent
+ updates,
+ [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ can be used.
+
+ This corresponds to the ``instance_config`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. A mask specifying which fields in
+ [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
+ should be updated. The field mask must always be
+ specified; this prevents any future fields in
+ [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
+ from being erased accidentally by clients that do not
+ know about them. Only display_name and labels can be
+ updated.
- This corresponds to the ``name`` field
+ This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
- google.cloud.spanner_admin_instance_v1.types.Instance:
- An isolated set of Cloud Spanner
- resources on which databases can be
- hosted.
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.spanner_admin_instance_v1.types.InstanceConfig` A possible configuration for a Cloud Spanner instance. Configurations
+ define the geographic placement of nodes and their
+ replication.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [instance_config, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.GetInstanceRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.UpdateInstanceConfigRequest):
+ request = spanner_instance_admin.UpdateInstanceConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
- if name is not None:
- request.name = name
+ if instance_config is not None:
+ request.instance_config = instance_config
+ if update_mask is not None:
+ request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.get_instance,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_instance_config
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("instance_config.name", request.instance_config.name),)
+ ),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -712,58 +958,37 @@ async def sample_get_instance():
metadata=metadata,
)
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_instance_admin.InstanceConfig,
+ metadata_type=spanner_instance_admin.UpdateInstanceConfigMetadata,
+ )
+
# Done; return the response.
return response
- async def create_instance(
+ async def delete_instance_config(
self,
- request: Union[spanner_instance_admin.CreateInstanceRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.DeleteInstanceConfigRequest, dict]
+ ] = None,
*,
- parent: str = None,
- instance_id: str = None,
- instance: spanner_instance_admin.Instance = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> operation_async.AsyncOperation:
- r"""Creates an instance and begins preparing it to begin serving.
- The returned [long-running
- operation][google.longrunning.Operation] can be used to track
- the progress of preparing the new instance. The instance name is
- assigned by the caller. If the named instance already exists,
- ``CreateInstance`` returns ``ALREADY_EXISTS``.
-
- Immediately upon completion of this request:
-
- - The instance is readable via the API, with all requested
- attributes but no allocated resources. Its state is
- ``CREATING``.
-
- Until completion of the returned operation:
-
- - Cancelling the operation renders the instance immediately
- unreadable via the API.
- - The instance can be deleted.
- - All other attempts to modify the instance are rejected.
-
- Upon completion of the returned operation:
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes the instance configuration. Deletion is only allowed
+ when no instances are using the configuration. If any instances
+ are using the configuration, returns ``FAILED_PRECONDITION``.
- - Billing for all successfully-allocated resources begins (some
- types may have lower than the requested levels).
- - Databases can be created in the instance.
- - The instance's allocated resource levels are readable via the
- API.
- - The instance's state becomes ``READY``.
+ Only user-managed configurations can be deleted.
- The returned [long-running
- operation][google.longrunning.Operation] will have a name of the
- format ``/operations/`` and can be
- used to track creation of the instance. The
- [metadata][google.longrunning.Operation.metadata] field type is
- [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
- The [response][google.longrunning.Operation.response] field type
- is [Instance][google.spanner.admin.instance.v1.Instance], if
- successful.
+ Authorization requires ``spanner.instanceConfigs.delete``
+ permission on the resource
+ [name][google.spanner.admin.instance.v1.InstanceConfig.name].
.. code-block:: python
@@ -776,184 +1001,106 @@ async def create_instance(
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_instance_v1
- async def sample_create_instance():
+ async def sample_delete_instance_config():
# Create a client
client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
# Initialize request argument(s)
- instance = spanner_admin_instance_v1.Instance()
- instance.name = "name_value"
- instance.config = "config_value"
- instance.display_name = "display_name_value"
-
- request = spanner_admin_instance_v1.CreateInstanceRequest(
- parent="parent_value",
- instance_id="instance_id_value",
- instance=instance,
+ request = spanner_admin_instance_v1.DeleteInstanceConfigRequest(
+ name="name_value",
)
# Make the request
- operation = client.create_instance(request=request)
-
- print("Waiting for operation to complete...")
-
- response = await operation.result()
-
- # Handle the response
- print(response)
+ await client.delete_instance_config(request=request)
Args:
- request (Union[google.cloud.spanner_admin_instance_v1.types.CreateInstanceRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.DeleteInstanceConfigRequest, dict]]):
The request object. The request for
- [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
- parent (:class:`str`):
- Required. The name of the project in which to create the
- instance. Values are of the form ``projects/``.
-
- This corresponds to the ``parent`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
- instance_id (:class:`str`):
- Required. The ID of the instance to create. Valid
- identifiers are of the form ``[a-z][-a-z0-9]*[a-z0-9]``
- and must be between 2 and 64 characters in length.
-
- This corresponds to the ``instance_id`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
- instance (:class:`google.cloud.spanner_admin_instance_v1.types.Instance`):
- Required. The instance to create. The name may be
- omitted, but if specified must be
- ``/instances/``.
+ [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig].
+ name (:class:`str`):
+ Required. The name of the instance configuration to be
+ deleted. Values are of the form
+ ``projects//instanceConfigs/``
- This corresponds to the ``instance`` field
+ This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
-
- Returns:
- google.api_core.operation_async.AsyncOperation:
- An object representing a long-running operation.
-
- The result type for the operation will be
- :class:`google.cloud.spanner_admin_instance_v1.types.Instance`
- An isolated set of Cloud Spanner resources on which
- databases can be hosted.
-
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([parent, instance_id, instance])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.CreateInstanceRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.DeleteInstanceConfigRequest):
+ request = spanner_instance_admin.DeleteInstanceConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
- if parent is not None:
- request.parent = parent
- if instance_id is not None:
- request.instance_id = instance_id
- if instance is not None:
- request.instance = instance
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.create_instance,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_instance_config
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- response = await rpc(
+ await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
- # Wrap the response in an operation future.
- response = operation_async.from_gapic(
- response,
- self._client._transport.operations_client,
- spanner_instance_admin.Instance,
- metadata_type=spanner_instance_admin.CreateInstanceMetadata,
- )
-
- # Done; return the response.
- return response
-
- async def update_instance(
+ async def list_instance_config_operations(
self,
- request: Union[spanner_instance_admin.UpdateInstanceRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.ListInstanceConfigOperationsRequest, dict]
+ ] = None,
*,
- instance: spanner_instance_admin.Instance = None,
- field_mask: field_mask_pb2.FieldMask = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> operation_async.AsyncOperation:
- r"""Updates an instance, and begins allocating or releasing
- resources as requested. The returned [long-running
- operation][google.longrunning.Operation] can be used to track
- the progress of updating the instance. If the named instance
- does not exist, returns ``NOT_FOUND``.
-
- Immediately upon completion of this request:
-
- - For resource types for which a decrease in the instance's
- allocation has been requested, billing is based on the
- newly-requested level.
-
- Until completion of the returned operation:
-
- - Cancelling the operation sets its metadata's
- [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
- and begins restoring resources to their pre-request values.
- The operation is guaranteed to succeed at undoing all
- resource changes, after which point it terminates with a
- ``CANCELLED`` status.
- - All other attempts to modify the instance are rejected.
- - Reading the instance via the API continues to give the
- pre-request resource levels.
-
- Upon completion of the returned operation:
-
- - Billing begins for all successfully-allocated resources (some
- types may have lower than the requested levels).
- - All newly-reserved resources are available for serving the
- instance's tables.
- - The instance's new resource levels are readable via the API.
-
- The returned [long-running
- operation][google.longrunning.Operation] will have a name of the
- format ``/operations/`` and can be
- used to track the instance modification. The
- [metadata][google.longrunning.Operation.metadata] field type is
- [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
- The [response][google.longrunning.Operation.response] field type
- is [Instance][google.spanner.admin.instance.v1.Instance], if
- successful.
-
- Authorization requires ``spanner.instances.update`` permission
- on the resource
- [name][google.spanner.admin.instance.v1.Instance.name].
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListInstanceConfigOperationsAsyncPager:
+ r"""Lists the user-managed instance configuration long-running
+ operations in the given project. An instance configuration
+ operation has a name of the form
+ ``projects//instanceConfigs//operations/``.
+ The long-running operation metadata field type
+ ``metadata.type_url`` describes the type of the metadata.
+ Operations returned include those that have
+ completed/failed/canceled within the last 7 days, and pending
+ operations. Operations returned are ordered by
+ ``operation.metadata.value.start_time`` in descending order
+ starting from the most recently started operation.
.. code-block:: python
@@ -966,107 +1113,93 @@ async def update_instance(
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_instance_v1
- async def sample_update_instance():
+ async def sample_list_instance_config_operations():
# Create a client
client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
# Initialize request argument(s)
- instance = spanner_admin_instance_v1.Instance()
- instance.name = "name_value"
- instance.config = "config_value"
- instance.display_name = "display_name_value"
-
- request = spanner_admin_instance_v1.UpdateInstanceRequest(
- instance=instance,
+ request = spanner_admin_instance_v1.ListInstanceConfigOperationsRequest(
+ parent="parent_value",
)
# Make the request
- operation = client.update_instance(request=request)
-
- print("Waiting for operation to complete...")
-
- response = await operation.result()
+ page_result = client.list_instance_config_operations(request=request)
# Handle the response
- print(response)
+ async for response in page_result:
+ print(response)
Args:
- request (Union[google.cloud.spanner_admin_instance_v1.types.UpdateInstanceRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.ListInstanceConfigOperationsRequest, dict]]):
The request object. The request for
- [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
- instance (:class:`google.cloud.spanner_admin_instance_v1.types.Instance`):
- Required. The instance to update, which must always
- include the instance name. Otherwise, only fields
- mentioned in
- [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
- need be included.
-
- This corresponds to the ``instance`` field
- on the ``request`` instance; if ``request`` is provided, this
- should not be set.
- field_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
- Required. A mask specifying which fields in
- [Instance][google.spanner.admin.instance.v1.Instance]
- should be updated. The field mask must always be
- specified; this prevents any future fields in
- [Instance][google.spanner.admin.instance.v1.Instance]
- from being erased accidentally by clients that do not
- know about them.
+ [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
+ parent (:class:`str`):
+ Required. The project of the instance configuration
+ operations. Values are of the form
+ ``projects/``.
- This corresponds to the ``field_mask`` field
+ This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
- google.api_core.operation_async.AsyncOperation:
- An object representing a long-running operation.
+ google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigOperationsAsyncPager:
+ The response for
+ [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
- The result type for the operation will be
- :class:`google.cloud.spanner_admin_instance_v1.types.Instance`
- An isolated set of Cloud Spanner resources on which
- databases can be hosted.
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([instance, field_mask])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.UpdateInstanceRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_instance_admin.ListInstanceConfigOperationsRequest
+ ):
+ request = spanner_instance_admin.ListInstanceConfigOperationsRequest(
+ request
+ )
# If we have keyword arguments corresponding to fields on the
# request, apply these.
- if instance is not None:
- request.instance = instance
- if field_mask is not None:
- request.field_mask = field_mask
+ if parent is not None:
+ request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.update_instance,
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_instance_config_operations
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata(
- (("instance.name", request.instance.name),)
- ),
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1075,37 +1208,32 @@ async def sample_update_instance():
metadata=metadata,
)
- # Wrap the response in an operation future.
- response = operation_async.from_gapic(
- response,
- self._client._transport.operations_client,
- spanner_instance_admin.Instance,
- metadata_type=spanner_instance_admin.UpdateInstanceMetadata,
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListInstanceConfigOperationsAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
)
# Done; return the response.
return response
- async def delete_instance(
+ async def list_instances(
self,
- request: Union[spanner_instance_admin.DeleteInstanceRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.ListInstancesRequest, dict]
+ ] = None,
*,
- name: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> None:
- r"""Deletes an instance.
-
- Immediately upon completion of the request:
-
- - Billing ceases for all of the instance's reserved resources.
-
- Soon afterward:
-
- - The instance and *all of its databases* immediately and
- irrevocably disappear from the API. All data in the databases
- is permanently deleted.
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListInstancesAsyncPager:
+ r"""Lists all instances in the given project.
.. code-block:: python
@@ -1118,99 +1246,123 @@ async def delete_instance(
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_instance_v1
- async def sample_delete_instance():
+ async def sample_list_instances():
# Create a client
client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
# Initialize request argument(s)
- request = spanner_admin_instance_v1.DeleteInstanceRequest(
- name="name_value",
+ request = spanner_admin_instance_v1.ListInstancesRequest(
+ parent="parent_value",
)
# Make the request
- await client.delete_instance(request=request)
+ page_result = client.list_instances(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
Args:
- request (Union[google.cloud.spanner_admin_instance_v1.types.DeleteInstanceRequest, dict]):
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.ListInstancesRequest, dict]]):
The request object. The request for
- [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
- name (:class:`str`):
- Required. The name of the instance to be deleted. Values
- are of the form
- ``projects//instances/``
+ [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+ parent (:class:`str`):
+ Required. The name of the project for which a list of
+ instances is requested. Values are of the form
+ ``projects/``.
- This corresponds to the ``name`` field
+ This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancesAsyncPager:
+ The response for
+ [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([name])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = spanner_instance_admin.DeleteInstanceRequest(request)
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.ListInstancesRequest):
+ request = spanner_instance_admin.ListInstancesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
- if name is not None:
- request.name = name
+ if parent is not None:
+ request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.delete_instance,
- default_retry=retries.Retry(
- initial=1.0,
- maximum=32.0,
- multiplier=1.3,
- predicate=retries.if_exception_type(
- core_exceptions.DeadlineExceeded,
- core_exceptions.ServiceUnavailable,
- ),
- deadline=3600.0,
- ),
- default_timeout=3600.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_instances
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
- await rpc(
+ response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
- async def set_iam_policy(
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListInstancesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_instance_partitions(
self,
- request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.ListInstancePartitionsRequest, dict]
+ ] = None,
*,
- resource: str = None,
+ parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> policy_pb2.Policy:
- r"""Sets the access control policy on an instance resource. Replaces
- any existing policy.
-
- Authorization requires ``spanner.instances.setIamPolicy`` on
- [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListInstancePartitionsAsyncPager:
+ r"""Lists all instance partitions for the given instance.
.. code-block:: python
@@ -1222,140 +1374,95 @@ async def set_iam_policy(
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_instance_v1
- from google.iam.v1 import iam_policy_pb2 # type: ignore
- async def sample_set_iam_policy():
+ async def sample_list_instance_partitions():
# Create a client
client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
# Initialize request argument(s)
- request = iam_policy_pb2.SetIamPolicyRequest(
- resource="resource_value",
+ request = spanner_admin_instance_v1.ListInstancePartitionsRequest(
+ parent="parent_value",
)
# Make the request
- response = await client.set_iam_policy(request=request)
+ page_result = client.list_instance_partitions(request=request)
# Handle the response
- print(response)
+ async for response in page_result:
+ print(response)
Args:
- request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
- The request object. Request message for `SetIamPolicy`
- method.
- resource (:class:`str`):
- REQUIRED: The resource for which the
- policy is being specified. See the
- operation documentation for the
- appropriate value for this field.
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.ListInstancePartitionsRequest, dict]]):
+ The request object. The request for
+ [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
+ parent (:class:`str`):
+ Required. The instance whose instance partitions should
+ be listed. Values are of the form
+ ``projects//instances/``. Use
+ ``{instance} = '-'`` to list instance partitions for all
+ Instances in a project, e.g.,
+ ``projects/myproject/instances/-``.
- This corresponds to the ``resource`` field
+ This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
- google.iam.v1.policy_pb2.Policy:
- An Identity and Access Management (IAM) policy, which specifies access
- controls for Google Cloud resources.
-
- A Policy is a collection of bindings. A binding binds
- one or more members, or principals, to a single role.
- Principals can be user accounts, service accounts,
- Google groups, and domains (such as G Suite). A role
- is a named list of permissions; each role can be an
- IAM predefined role or a user-created custom role.
-
- For some types of Google Cloud resources, a binding
- can also specify a condition, which is a logical
- expression that allows access to a resource only if
- the expression evaluates to true. A condition can add
- constraints based on attributes of the request, the
- resource, or both. To learn which resources support
- conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
-
- **JSON example:**
-
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
-
- **YAML example:**
-
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionsAsyncPager:
+ The response for
+ [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
- For a description of IAM and its features, see the
- [IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
- # Quick check: If we got a request object, we should *not* have
- # gotten any keyword arguments that map to the request.
- has_flattened_params = any([resource])
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- # The request isn't a proto-plus wrapped type,
- # so it must be constructed via keyword expansion.
- if isinstance(request, dict):
- request = iam_policy_pb2.SetIamPolicyRequest(**request)
- elif not request:
- request = iam_policy_pb2.SetIamPolicyRequest(
- resource=resource,
- )
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_instance_admin.ListInstancePartitionsRequest
+ ):
+ request = spanner_instance_admin.ListInstancePartitionsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method_async.wrap_method(
- self._client._transport.set_iam_policy,
- default_timeout=30.0,
- client_info=DEFAULT_CLIENT_INFO,
- )
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_instance_partitions
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
- gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
# Send the request.
response = await rpc(
request,
@@ -1364,24 +1471,32 @@ async def sample_set_iam_policy():
metadata=metadata,
)
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListInstancePartitionsAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
# Done; return the response.
return response
- async def get_iam_policy(
+ async def get_instance(
self,
- request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
+ request: Optional[
+ Union[spanner_instance_admin.GetInstanceRequest, dict]
+ ] = None,
*,
- resource: str = None,
+ name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
- timeout: float = None,
- metadata: Sequence[Tuple[str, str]] = (),
- ) -> policy_pb2.Policy:
- r"""Gets the access control policy for an instance resource. Returns
- an empty policy if an instance exists but does not have a policy
- set.
-
- Authorization requires ``spanner.instances.getIamPolicy`` on
- [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_instance_admin.Instance:
+ r"""Gets information about a particular instance.
.. code-block:: python
@@ -1393,150 +1508,1760 @@ async def get_iam_policy(
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import spanner_admin_instance_v1
- from google.iam.v1 import iam_policy_pb2 # type: ignore
- async def sample_get_iam_policy():
+ async def sample_get_instance():
# Create a client
client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
# Initialize request argument(s)
- request = iam_policy_pb2.GetIamPolicyRequest(
- resource="resource_value",
+ request = spanner_admin_instance_v1.GetInstanceRequest(
+ name="name_value",
)
# Make the request
- response = await client.get_iam_policy(request=request)
+ response = await client.get_instance(request=request)
# Handle the response
print(response)
Args:
- request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
- The request object. Request message for `GetIamPolicy`
- method.
- resource (:class:`str`):
- REQUIRED: The resource for which the
- policy is being requested. See the
- operation documentation for the
- appropriate value for this field.
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.GetInstanceRequest, dict]]):
+ The request object. The request for
+ [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
+ name (:class:`str`):
+ Required. The name of the requested instance. Values are
+ of the form ``projects//instances/``.
- This corresponds to the ``resource`` field
+ This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
- metadata (Sequence[Tuple[str, str]]): Strings which should be
- sent along with the request as metadata.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
Returns:
- google.iam.v1.policy_pb2.Policy:
+ google.cloud.spanner_admin_instance_v1.types.Instance:
+ An isolated set of Cloud Spanner
+ resources on which databases can be
+ hosted.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.GetInstanceRequest):
+ request = spanner_instance_admin.GetInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def create_instance(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.CreateInstanceRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ instance_id: Optional[str] = None,
+ instance: Optional[spanner_instance_admin.Instance] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates an instance and begins preparing it to begin serving.
+ The returned long-running operation can be used to track the
+ progress of preparing the new instance. The instance name is
+ assigned by the caller. If the named instance already exists,
+ ``CreateInstance`` returns ``ALREADY_EXISTS``.
+
+ Immediately upon completion of this request:
+
+ - The instance is readable via the API, with all requested
+ attributes but no allocated resources. Its state is
+ ``CREATING``.
+
+ Until completion of the returned operation:
+
+ - Cancelling the operation renders the instance immediately
+ unreadable via the API.
+ - The instance can be deleted.
+ - All other attempts to modify the instance are rejected.
+
+ Upon completion of the returned operation:
+
+ - Billing for all successfully-allocated resources begins (some
+ types may have lower than the requested levels).
+ - Databases can be created in the instance.
+ - The instance's allocated resource levels are readable via the
+ API.
+ - The instance's state becomes ``READY``.
+
+ The returned long-running operation will have a name of the
+ format ``/operations/`` and can be
+ used to track creation of the instance. The metadata field type
+ is
+ [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
+ The response field type is
+ [Instance][google.spanner.admin.instance.v1.Instance], if
+ successful.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_create_instance():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = spanner_admin_instance_v1.Instance()
+ instance.name = "name_value"
+ instance.config = "config_value"
+ instance.display_name = "display_name_value"
+
+ request = spanner_admin_instance_v1.CreateInstanceRequest(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.create_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.CreateInstanceRequest, dict]]):
+ The request object. The request for
+ [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
+ parent (:class:`str`):
+ Required. The name of the project in which to create the
+ instance. Values are of the form ``projects/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance_id (:class:`str`):
+ Required. The ID of the instance to create. Valid
+ identifiers are of the form ``[a-z][-a-z0-9]*[a-z0-9]``
+ and must be between 2 and 64 characters in length.
+
+ This corresponds to the ``instance_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance (:class:`google.cloud.spanner_admin_instance_v1.types.Instance`):
+ Required. The instance to create. The name may be
+ omitted, but if specified must be
+ ``/instances/``.
+
+ This corresponds to the ``instance`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_instance_v1.types.Instance`
+ An isolated set of Cloud Spanner resources on which
+ databases can be hosted.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, instance_id, instance]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.CreateInstanceRequest):
+ request = spanner_instance_admin.CreateInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if instance_id is not None:
+ request.instance_id = instance_id
+ if instance is not None:
+ request.instance = instance
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_instance_admin.Instance,
+ metadata_type=spanner_instance_admin.CreateInstanceMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_instance(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.UpdateInstanceRequest, dict]
+ ] = None,
+ *,
+ instance: Optional[spanner_instance_admin.Instance] = None,
+ field_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates an instance, and begins allocating or releasing
+ resources as requested. The returned long-running operation can
+ be used to track the progress of updating the instance. If the
+ named instance does not exist, returns ``NOT_FOUND``.
+
+ Immediately upon completion of this request:
+
+ - For resource types for which a decrease in the instance's
+ allocation has been requested, billing is based on the
+ newly-requested level.
+
+ Until completion of the returned operation:
+
+ - Cancelling the operation sets its metadata's
+ [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
+ and begins restoring resources to their pre-request values.
+ The operation is guaranteed to succeed at undoing all resource
+ changes, after which point it terminates with a ``CANCELLED``
+ status.
+ - All other attempts to modify the instance are rejected.
+ - Reading the instance via the API continues to give the
+ pre-request resource levels.
+
+ Upon completion of the returned operation:
+
+ - Billing begins for all successfully-allocated resources (some
+ types may have lower than the requested levels).
+ - All newly-reserved resources are available for serving the
+ instance's tables.
+ - The instance's new resource levels are readable via the API.
+
+ The returned long-running operation will have a name of the
+ format ``/operations/`` and can be
+ used to track the instance modification. The metadata field type
+ is
+ [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
+ The response field type is
+ [Instance][google.spanner.admin.instance.v1.Instance], if
+ successful.
+
+ Authorization requires ``spanner.instances.update`` permission
+ on the resource
+ [name][google.spanner.admin.instance.v1.Instance.name].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_update_instance():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = spanner_admin_instance_v1.Instance()
+ instance.name = "name_value"
+ instance.config = "config_value"
+ instance.display_name = "display_name_value"
+
+ request = spanner_admin_instance_v1.UpdateInstanceRequest(
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.update_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.UpdateInstanceRequest, dict]]):
+ The request object. The request for
+ [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
+ instance (:class:`google.cloud.spanner_admin_instance_v1.types.Instance`):
+ Required. The instance to update, which must always
+ include the instance name. Otherwise, only fields
+ mentioned in
+ [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
+ need be included.
+
+ This corresponds to the ``instance`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ field_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. A mask specifying which fields in
+ [Instance][google.spanner.admin.instance.v1.Instance]
+ should be updated. The field mask must always be
+ specified; this prevents any future fields in
+ [Instance][google.spanner.admin.instance.v1.Instance]
+ from being erased accidentally by clients that do not
+ know about them.
+
+ This corresponds to the ``field_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.spanner_admin_instance_v1.types.Instance`
+ An isolated set of Cloud Spanner resources on which
+ databases can be hosted.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [instance, field_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.UpdateInstanceRequest):
+ request = spanner_instance_admin.UpdateInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if instance is not None:
+ request.instance = instance
+ if field_mask is not None:
+ request.field_mask = field_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("instance.name", request.instance.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_instance_admin.Instance,
+ metadata_type=spanner_instance_admin.UpdateInstanceMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_instance(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.DeleteInstanceRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes an instance.
+
+ Immediately upon completion of the request:
+
+ - Billing ceases for all of the instance's reserved resources.
+
+ Soon afterward:
+
+ - The instance and *all of its databases* immediately and
+ irrevocably disappear from the API. All data in the databases
+ is permanently deleted.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_delete_instance():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_instance_v1.DeleteInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_instance(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.DeleteInstanceRequest, dict]]):
+ The request object. The request for
+ [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
+ name (:class:`str`):
+ Required. The name of the instance to be deleted. Values
+ are of the form
+ ``projects//instances/``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.DeleteInstanceRequest):
+ request = spanner_instance_admin.DeleteInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_instance
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def set_iam_policy(
+ self,
+ request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
+ *,
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Sets the access control policy on an instance resource. Replaces
+ any existing policy.
+
+ Authorization requires ``spanner.instances.setIamPolicy`` on
+ [resource][google.iam.v1.SetIamPolicyRequest.resource].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_set_iam_policy():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
+ The request object. Request message for ``SetIamPolicy`` method.
+ resource (:class:`str`):
+ REQUIRED: The resource for which the
+ policy is being specified. See the
+ operation documentation for the
+ appropriate value for this field.
+
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.iam.v1.policy_pb2.Policy:
+ An Identity and Access Management (IAM) policy, which specifies access
+ controls for Google Cloud resources.
+
+ A Policy is a collection of bindings. A binding binds
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
+
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
+
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+
+ For a description of IAM and its features, see the
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = iam_policy_pb2.SetIamPolicyRequest(**request)
+ elif not request:
+ request = iam_policy_pb2.SetIamPolicyRequest(resource=resource)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.set_iam_policy
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_iam_policy(
+ self,
+ request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
+ *,
+ resource: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> policy_pb2.Policy:
+ r"""Gets the access control policy for an instance resource. Returns
+ an empty policy if an instance exists but does not have a policy
+ set.
+
+ Authorization requires ``spanner.instances.getIamPolicy`` on
+ [resource][google.iam.v1.GetIamPolicyRequest.resource].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_get_iam_policy():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
+ The request object. Request message for ``GetIamPolicy`` method.
+ resource (:class:`str`):
+ REQUIRED: The resource for which the
+ policy is being requested. See the
+ operation documentation for the
+ appropriate value for this field.
+
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.iam.v1.policy_pb2.Policy:
An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources.
- A Policy is a collection of bindings. A binding binds
- one or more members, or principals, to a single role.
- Principals can be user accounts, service accounts,
- Google groups, and domains (such as G Suite). A role
- is a named list of permissions; each role can be an
- IAM predefined role or a user-created custom role.
+ A Policy is a collection of bindings. A binding binds
+ one or more members, or principals, to a single role.
+ Principals can be user accounts, service accounts,
+ Google groups, and domains (such as G Suite). A role
+ is a named list of permissions; each role can be an
+ IAM predefined role or a user-created custom role.
+
+ For some types of Google Cloud resources, a binding
+ can also specify a condition, which is a logical
+ expression that allows access to a resource only if
+ the expression evaluates to true. A condition can add
+ constraints based on attributes of the request, the
+ resource, or both. To learn which resources support
+ conditions in their IAM policies, see the [IAM
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+
+ **JSON example:**
+
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+
+ **YAML example:**
+
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+
+ For a description of IAM and its features, see the
+ [IAM
+ documentation](https://cloud.google.com/iam/docs/).
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = iam_policy_pb2.GetIamPolicyRequest(**request)
+ elif not request:
+ request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_iam_policy
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def test_iam_permissions(
+ self,
+ request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
+ *,
+ resource: Optional[str] = None,
+ permissions: Optional[MutableSequence[str]] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> iam_policy_pb2.TestIamPermissionsResponse:
+ r"""Returns permissions that the caller has on the specified
+ instance resource.
+
+ Attempting this RPC on a non-existent Cloud Spanner instance
+ resource will result in a NOT_FOUND error if the user has
+ ``spanner.instances.list`` permission on the containing Google
+ Cloud Project. Otherwise returns an empty set of permissions.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_test_iam_permissions():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
+ The request object. Request message for ``TestIamPermissions`` method.
+ resource (:class:`str`):
+ REQUIRED: The resource for which the
+ policy detail is being requested. See
+ the operation documentation for the
+ appropriate value for this field.
+
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ permissions (:class:`MutableSequence[str]`):
+ The set of permissions to check for the ``resource``.
+ Permissions with wildcards (such as '*' or 'storage.*')
+ are not allowed. For more information see `IAM
+ Overview `__.
+
+ This corresponds to the ``permissions`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
+ Response message for TestIamPermissions method.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [resource, permissions]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - The request isn't a proto-plus wrapped type,
+ # so it must be constructed via keyword expansion.
+ if isinstance(request, dict):
+ request = iam_policy_pb2.TestIamPermissionsRequest(**request)
+ elif not request:
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource=resource, permissions=permissions
+ )
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.test_iam_permissions
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_instance_partition(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.GetInstancePartitionRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> spanner_instance_admin.InstancePartition:
+ r"""Gets information about a particular instance
+ partition.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_get_instance_partition():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_instance_v1.GetInstancePartitionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_instance_partition(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.GetInstancePartitionRequest, dict]]):
+ The request object. The request for
+ [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition].
+ name (:class:`str`):
+ Required. The name of the requested instance partition.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/instancePartitions/{instance_partition}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.spanner_admin_instance_v1.types.InstancePartition:
+ An isolated set of Cloud Spanner
+ resources that databases can define
+ placements on.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, spanner_instance_admin.GetInstancePartitionRequest):
+ request = spanner_instance_admin.GetInstancePartitionRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_instance_partition
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def create_instance_partition(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.CreateInstancePartitionRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ instance_partition: Optional[spanner_instance_admin.InstancePartition] = None,
+ instance_partition_id: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates an instance partition and begins preparing it to be
+ used. The returned long-running operation can be used to track
+ the progress of preparing the new instance partition. The
+ instance partition name is assigned by the caller. If the named
+ instance partition already exists, ``CreateInstancePartition``
+ returns ``ALREADY_EXISTS``.
+
+ Immediately upon completion of this request:
+
+ - The instance partition is readable via the API, with all
+ requested attributes but no allocated resources. Its state is
+ ``CREATING``.
+
+ Until completion of the returned operation:
+
+ - Cancelling the operation renders the instance partition
+ immediately unreadable via the API.
+ - The instance partition can be deleted.
+ - All other attempts to modify the instance partition are
+ rejected.
+
+ Upon completion of the returned operation:
+
+ - Billing for all successfully-allocated resources begins (some
+ types may have lower than the requested levels).
+ - Databases can start using this instance partition.
+ - The instance partition's allocated resource levels are
+ readable via the API.
+ - The instance partition's state becomes ``READY``.
+
+ The returned long-running operation will have a name of the
+ format ``/operations/``
+ and can be used to track creation of the instance partition. The
+ metadata field type is
+ [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
+ The response field type is
+ [InstancePartition][google.spanner.admin.instance.v1.InstancePartition],
+ if successful.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_create_instance_partition():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance_partition = spanner_admin_instance_v1.InstancePartition()
+ instance_partition.node_count = 1070
+ instance_partition.name = "name_value"
+ instance_partition.config = "config_value"
+ instance_partition.display_name = "display_name_value"
+
+ request = spanner_admin_instance_v1.CreateInstancePartitionRequest(
+ parent="parent_value",
+ instance_partition_id="instance_partition_id_value",
+ instance_partition=instance_partition,
+ )
+
+ # Make the request
+ operation = client.create_instance_partition(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.CreateInstancePartitionRequest, dict]]):
+ The request object. The request for
+ [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
+ parent (:class:`str`):
+ Required. The name of the instance in which to create
+ the instance partition. Values are of the form
+ ``projects//instances/``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance_partition (:class:`google.cloud.spanner_admin_instance_v1.types.InstancePartition`):
+ Required. The instance partition to create. The
+ instance_partition.name may be omitted, but if specified
+ must be
+ ``/instancePartitions/``.
+
+ This corresponds to the ``instance_partition`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance_partition_id (:class:`str`):
+ Required. The ID of the instance partition to create.
+ Valid identifiers are of the form
+ ``[a-z][-a-z0-9]*[a-z0-9]`` and must be between 2 and 64
+ characters in length.
+
+ This corresponds to the ``instance_partition_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.spanner_admin_instance_v1.types.InstancePartition` An isolated set of Cloud Spanner resources that databases can define
+ placements on.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, instance_partition, instance_partition_id]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_instance_admin.CreateInstancePartitionRequest
+ ):
+ request = spanner_instance_admin.CreateInstancePartitionRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if instance_partition is not None:
+ request.instance_partition = instance_partition
+ if instance_partition_id is not None:
+ request.instance_partition_id = instance_partition_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_instance_partition
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_instance_admin.InstancePartition,
+ metadata_type=spanner_instance_admin.CreateInstancePartitionMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_instance_partition(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.DeleteInstancePartitionRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes an existing instance partition. Requires that the
+ instance partition is not used by any database or backup and is
+ not the default instance partition of an instance.
+
+ Authorization requires ``spanner.instancePartitions.delete``
+ permission on the resource
+ [name][google.spanner.admin.instance.v1.InstancePartition.name].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_delete_instance_partition():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = spanner_admin_instance_v1.DeleteInstancePartitionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_instance_partition(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.DeleteInstancePartitionRequest, dict]]):
+ The request object. The request for
+ [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition].
+ name (:class:`str`):
+ Required. The name of the instance partition to be
+ deleted. Values are of the form
+ ``projects/{project}/instances/{instance}/instancePartitions/{instance_partition}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_instance_admin.DeleteInstancePartitionRequest
+ ):
+ request = spanner_instance_admin.DeleteInstancePartitionRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_instance_partition
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def update_instance_partition(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.UpdateInstancePartitionRequest, dict]
+ ] = None,
+ *,
+ instance_partition: Optional[spanner_instance_admin.InstancePartition] = None,
+ field_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates an instance partition, and begins allocating or
+ releasing resources as requested. The returned long-running
+ operation can be used to track the progress of updating the
+ instance partition. If the named instance partition does not
+ exist, returns ``NOT_FOUND``.
+
+ Immediately upon completion of this request:
+
+ - For resource types for which a decrease in the instance
+ partition's allocation has been requested, billing is based on
+ the newly-requested level.
+
+ Until completion of the returned operation:
+
+ - Cancelling the operation sets its metadata's
+ [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
+ and begins restoring resources to their pre-request values.
+ The operation is guaranteed to succeed at undoing all resource
+ changes, after which point it terminates with a ``CANCELLED``
+ status.
+ - All other attempts to modify the instance partition are
+ rejected.
+ - Reading the instance partition via the API continues to give
+ the pre-request resource levels.
+
+ Upon completion of the returned operation:
+
+ - Billing begins for all successfully-allocated resources (some
+ types may have lower than the requested levels).
+ - All newly-reserved resources are available for serving the
+ instance partition's tables.
+ - The instance partition's new resource levels are readable via
+ the API.
+
+ The returned long-running operation will have a name of the
+ format ``/operations/``
+ and can be used to track the instance partition modification.
+ The metadata field type is
+ [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
+ The response field type is
+ [InstancePartition][google.spanner.admin.instance.v1.InstancePartition],
+ if successful.
+
+ Authorization requires ``spanner.instancePartitions.update``
+ permission on the resource
+ [name][google.spanner.admin.instance.v1.InstancePartition.name].
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import spanner_admin_instance_v1
+
+ async def sample_update_instance_partition():
+ # Create a client
+ client = spanner_admin_instance_v1.InstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance_partition = spanner_admin_instance_v1.InstancePartition()
+ instance_partition.node_count = 1070
+ instance_partition.name = "name_value"
+ instance_partition.config = "config_value"
+ instance_partition.display_name = "display_name_value"
+
+ request = spanner_admin_instance_v1.UpdateInstancePartitionRequest(
+ instance_partition=instance_partition,
+ )
+
+ # Make the request
+ operation = client.update_instance_partition(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.UpdateInstancePartitionRequest, dict]]):
+ The request object. The request for
+ [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
+ instance_partition (:class:`google.cloud.spanner_admin_instance_v1.types.InstancePartition`):
+ Required. The instance partition to update, which must
+ always include the instance partition name. Otherwise,
+ only fields mentioned in
+ [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
+ need be included.
+
+ This corresponds to the ``instance_partition`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ field_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. A mask specifying which fields in
+ [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
+ should be updated. The field mask must always be
+ specified; this prevents any future fields in
+ [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
+ from being erased accidentally by clients that do not
+ know about them.
+
+ This corresponds to the ``field_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.spanner_admin_instance_v1.types.InstancePartition` An isolated set of Cloud Spanner resources that databases can define
+ placements on.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [instance_partition, field_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, spanner_instance_admin.UpdateInstancePartitionRequest
+ ):
+ request = spanner_instance_admin.UpdateInstancePartitionRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if instance_partition is not None:
+ request.instance_partition = instance_partition
+ if field_mask is not None:
+ request.field_mask = field_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_instance_partition
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("instance_partition.name", request.instance_partition.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ spanner_instance_admin.InstancePartition,
+ metadata_type=spanner_instance_admin.UpdateInstancePartitionMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_instance_partition_operations(
+ self,
+ request: Optional[
+ Union[spanner_instance_admin.ListInstancePartitionOperationsRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListInstancePartitionOperationsAsyncPager:
+ r"""Lists instance partition long-running operations in the given
+ instance. An instance partition operation has a name of the form
+ ``projects//instances/