From f2e3505dbbf7879a687c4a0af35112626cb88e69 Mon Sep 17 00:00:00 2001 From: Ajumal Date: Tue, 11 Jun 2024 06:43:30 +0530 Subject: [PATCH] feat: introduce ManagedKafka (#7341) --- .repo-metadata-full.json | 8 + ManagedKafka/.OwlBot.yaml | 4 + ManagedKafka/.gitattributes | 7 + ManagedKafka/.github/pull_request_template.md | 24 + ManagedKafka/CONTRIBUTING.md | 10 + ManagedKafka/LICENSE | 202 +++ ManagedKafka/README.md | 45 + ManagedKafka/VERSION | 1 + ManagedKafka/composer.json | 30 + ManagedKafka/metadata/V1/ManagedKafka.php | 125 ++ ManagedKafka/metadata/V1/Resources.php | Bin 0 -> 3792 bytes ManagedKafka/owlbot.py | 56 + ManagedKafka/phpunit.xml.dist | 16 + .../V1/ManagedKafkaClient/create_cluster.php | 138 ++ .../V1/ManagedKafkaClient/create_topic.php | 95 + .../V1/ManagedKafkaClient/delete_cluster.php | 80 + .../delete_consumer_group.php | 75 + .../V1/ManagedKafkaClient/delete_topic.php | 70 + .../V1/ManagedKafkaClient/get_cluster.php | 71 + .../ManagedKafkaClient/get_consumer_group.php | 77 + .../V1/ManagedKafkaClient/get_location.php | 57 + .../V1/ManagedKafkaClient/get_topic.php | 73 + .../V1/ManagedKafkaClient/list_clusters.php | 77 + .../list_consumer_groups.php | 78 + .../V1/ManagedKafkaClient/list_locations.php | 62 + .../V1/ManagedKafkaClient/list_topics.php | 77 + .../V1/ManagedKafkaClient/update_cluster.php | 124 ++ .../update_consumer_group.php | 62 + .../V1/ManagedKafkaClient/update_topic.php | 82 + ManagedKafka/src/V1/AccessConfig.php | 75 + ManagedKafka/src/V1/CapacityConfig.php | 109 ++ .../src/V1/Client/ManagedKafkaClient.php | 802 +++++++++ ManagedKafka/src/V1/Cluster.php | 360 ++++ ManagedKafka/src/V1/Cluster/State.php | 69 + ManagedKafka/src/V1/ConsumerGroup.php | 117 ++ .../src/V1/ConsumerPartitionMetadata.php | 109 ++ ManagedKafka/src/V1/ConsumerTopicMetadata.php | 71 + ManagedKafka/src/V1/CreateClusterRequest.php | 272 +++ ManagedKafka/src/V1/CreateTopicRequest.php | 189 ++ ManagedKafka/src/V1/DeleteClusterRequest.php | 159 ++ .../src/V1/DeleteConsumerGroupRequest.php | 86 + ManagedKafka/src/V1/DeleteTopicRequest.php | 86 + ManagedKafka/src/V1/GcpConfig.php | 132 ++ ManagedKafka/src/V1/GetClusterRequest.php | 81 + .../src/V1/GetConsumerGroupRequest.php | 86 + ManagedKafka/src/V1/GetTopicRequest.php | 91 + ManagedKafka/src/V1/ListClustersRequest.php | 242 +++ ManagedKafka/src/V1/ListClustersResponse.php | 139 ++ .../src/V1/ListConsumerGroupsRequest.php | 179 ++ .../src/V1/ListConsumerGroupsResponse.php | 109 ++ ManagedKafka/src/V1/ListTopicsRequest.php | 174 ++ ManagedKafka/src/V1/ListTopicsResponse.php | 109 ++ ManagedKafka/src/V1/NetworkConfig.php | 96 ++ ManagedKafka/src/V1/OperationMetadata.php | 307 ++++ ManagedKafka/src/V1/RebalanceConfig.php | 71 + ManagedKafka/src/V1/RebalanceConfig/Mode.php | 63 + ManagedKafka/src/V1/Topic.php | 201 +++ ManagedKafka/src/V1/UpdateClusterRequest.php | 234 +++ .../src/V1/UpdateConsumerGroupRequest.php | 156 ++ ManagedKafka/src/V1/UpdateTopicRequest.php | 156 ++ ManagedKafka/src/V1/gapic_metadata.json | 98 ++ .../managed_kafka_client_config.json | 124 ++ .../managed_kafka_descriptor_config.php | 285 +++ .../managed_kafka_rest_client_config.php | 277 +++ .../Unit/V1/Client/ManagedKafkaClientTest.php | 1522 +++++++++++++++++ composer.json | 3 + 66 files changed, 9265 insertions(+) create mode 100644 ManagedKafka/.OwlBot.yaml create mode 100644 ManagedKafka/.gitattributes create mode 100644 ManagedKafka/.github/pull_request_template.md create mode 100644 ManagedKafka/CONTRIBUTING.md create mode 100644 ManagedKafka/LICENSE create mode 100644 ManagedKafka/README.md create mode 100644 ManagedKafka/VERSION create mode 100644 ManagedKafka/composer.json create mode 100644 ManagedKafka/metadata/V1/ManagedKafka.php create mode 100644 ManagedKafka/metadata/V1/Resources.php create mode 100644 ManagedKafka/owlbot.py create mode 100644 ManagedKafka/phpunit.xml.dist create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/create_cluster.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/create_topic.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/delete_cluster.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/delete_consumer_group.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/delete_topic.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/get_cluster.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/get_consumer_group.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/get_location.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/get_topic.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/list_clusters.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/list_consumer_groups.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/list_locations.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/list_topics.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/update_cluster.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/update_consumer_group.php create mode 100644 ManagedKafka/samples/V1/ManagedKafkaClient/update_topic.php create mode 100644 ManagedKafka/src/V1/AccessConfig.php create mode 100644 ManagedKafka/src/V1/CapacityConfig.php create mode 100644 ManagedKafka/src/V1/Client/ManagedKafkaClient.php create mode 100644 ManagedKafka/src/V1/Cluster.php create mode 100644 ManagedKafka/src/V1/Cluster/State.php create mode 100644 ManagedKafka/src/V1/ConsumerGroup.php create mode 100644 ManagedKafka/src/V1/ConsumerPartitionMetadata.php create mode 100644 ManagedKafka/src/V1/ConsumerTopicMetadata.php create mode 100644 ManagedKafka/src/V1/CreateClusterRequest.php create mode 100644 ManagedKafka/src/V1/CreateTopicRequest.php create mode 100644 ManagedKafka/src/V1/DeleteClusterRequest.php create mode 100644 ManagedKafka/src/V1/DeleteConsumerGroupRequest.php create mode 100644 ManagedKafka/src/V1/DeleteTopicRequest.php create mode 100644 ManagedKafka/src/V1/GcpConfig.php create mode 100644 ManagedKafka/src/V1/GetClusterRequest.php create mode 100644 ManagedKafka/src/V1/GetConsumerGroupRequest.php create mode 100644 ManagedKafka/src/V1/GetTopicRequest.php create mode 100644 ManagedKafka/src/V1/ListClustersRequest.php create mode 100644 ManagedKafka/src/V1/ListClustersResponse.php create mode 100644 ManagedKafka/src/V1/ListConsumerGroupsRequest.php create mode 100644 ManagedKafka/src/V1/ListConsumerGroupsResponse.php create mode 100644 ManagedKafka/src/V1/ListTopicsRequest.php create mode 100644 ManagedKafka/src/V1/ListTopicsResponse.php create mode 100644 ManagedKafka/src/V1/NetworkConfig.php create mode 100644 ManagedKafka/src/V1/OperationMetadata.php create mode 100644 ManagedKafka/src/V1/RebalanceConfig.php create mode 100644 ManagedKafka/src/V1/RebalanceConfig/Mode.php create mode 100644 ManagedKafka/src/V1/Topic.php create mode 100644 ManagedKafka/src/V1/UpdateClusterRequest.php create mode 100644 ManagedKafka/src/V1/UpdateConsumerGroupRequest.php create mode 100644 ManagedKafka/src/V1/UpdateTopicRequest.php create mode 100644 ManagedKafka/src/V1/gapic_metadata.json create mode 100644 ManagedKafka/src/V1/resources/managed_kafka_client_config.json create mode 100644 ManagedKafka/src/V1/resources/managed_kafka_descriptor_config.php create mode 100644 ManagedKafka/src/V1/resources/managed_kafka_rest_client_config.php create mode 100644 ManagedKafka/tests/Unit/V1/Client/ManagedKafkaClientTest.php diff --git a/.repo-metadata-full.json b/.repo-metadata-full.json index 49b3491111d7..e4ea8dd43ebf 100644 --- a/.repo-metadata-full.json +++ b/.repo-metadata-full.json @@ -847,6 +847,14 @@ "library_type": "GAPIC_AUTO", "api_shortname": "managedidentities" }, + "ManagedKafka": { + "language": "php", + "distribution_name": "google/cloud-managedkafka", + "release_level": "preview", + "client_documentation": "https://cloud.google.com/php/docs/reference/cloud-managedkafka/latest", + "library_type": "GAPIC_AUTO", + "api_shortname": "managedkafka" + }, "MapsFleetEngine": { "language": "php", "distribution_name": "google/maps-fleetengine", diff --git a/ManagedKafka/.OwlBot.yaml b/ManagedKafka/.OwlBot.yaml new file mode 100644 index 000000000000..291e9a54ebf5 --- /dev/null +++ b/ManagedKafka/.OwlBot.yaml @@ -0,0 +1,4 @@ +deep-copy-regex: + - source: /google/cloud/managedkafka/(v1)/.*-php/(.*) + dest: /owl-bot-staging/ManagedKafka/$1/$2 +api-name: ManagedKafka diff --git a/ManagedKafka/.gitattributes b/ManagedKafka/.gitattributes new file mode 100644 index 000000000000..4bf0fe6f415b --- /dev/null +++ b/ManagedKafka/.gitattributes @@ -0,0 +1,7 @@ +/*.xml.dist export-ignore +/.OwlBot.yaml export-ignore +/.github export-ignore +/owlbot.py export-ignore +/src/**/gapic_metadata.json export-ignore +/samples export-ignore +/tests export-ignore diff --git a/ManagedKafka/.github/pull_request_template.md b/ManagedKafka/.github/pull_request_template.md new file mode 100644 index 000000000000..9e726e4417ce --- /dev/null +++ b/ManagedKafka/.github/pull_request_template.md @@ -0,0 +1,24 @@ +**PLEASE READ THIS ENTIRE MESSAGE** + +Hello, and thank you for your contribution! Please note that this repository is +a read-only split of `googleapis/google-cloud-php`. As such, we are +unable to accept pull requests to this repository. + +We welcome your pull request and would be happy to consider it for inclusion in +our library if you follow these steps: + +* Clone the parent client library repository: + +```sh +$ git clone git@github.com:googleapis/google-cloud-php.git +``` + +* Move your changes into the correct location in that library. Library code +belongs in `ManagedKafka/src`, and tests in `ManagedKafka/tests`. + +* Push the changes in a new branch to a fork, and open a new pull request +[here](https://github.com/googleapis/google-cloud-php). + +Thanks again, and we look forward to seeing your proposed change! + +The Google Cloud PHP team diff --git a/ManagedKafka/CONTRIBUTING.md b/ManagedKafka/CONTRIBUTING.md new file mode 100644 index 000000000000..76ea811cacdb --- /dev/null +++ b/ManagedKafka/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. We accept +and review pull requests against the main +[Google Cloud PHP](https://github.com/googleapis/google-cloud-php) +repository, which contains all of our client libraries. You will also need to +sign a Contributor License Agreement. For more details about how to contribute, +see the +[CONTRIBUTING.md](https://github.com/googleapis/google-cloud-php/blob/main/CONTRIBUTING.md) +file in the main Google Cloud PHP repository. diff --git a/ManagedKafka/LICENSE b/ManagedKafka/LICENSE new file mode 100644 index 000000000000..8f71f43fee3f --- /dev/null +++ b/ManagedKafka/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/ManagedKafka/README.md b/ManagedKafka/README.md new file mode 100644 index 000000000000..51252e388f14 --- /dev/null +++ b/ManagedKafka/README.md @@ -0,0 +1,45 @@ +# Google Cloud Managed Kafka for PHP + +> Idiomatic PHP client for [Google Cloud Managed Kafka](https://cloud.google.com/managed-kafka). + +[![Latest Stable Version](https://poser.pugx.org/google/cloud-managedkafka/v/stable)](https://packagist.org/packages/google/cloud-managedkafka) [![Packagist](https://img.shields.io/packagist/dm/google/cloud-managedkafka.svg)](https://packagist.org/packages/google/cloud-managedkafka) + +* [API documentation](https://cloud.google.com/php/docs/reference/cloud-managedkafka/latest) + +**NOTE:** This repository is part of [Google Cloud PHP](https://github.com/googleapis/google-cloud-php). Any +support requests, bug reports, or development contributions should be directed to +that project. + +### Installation + +To begin, install the preferred dependency manager for PHP, [Composer](https://getcomposer.org/). + +Now, install this component: + +```sh +$ composer require google/cloud-managedkafka +``` + +> Browse the complete list of [Google Cloud APIs](https://cloud.google.com/php/docs/reference) +> for PHP + +This component supports both REST over HTTP/1.1 and gRPC. In order to take advantage of the benefits +offered by gRPC (such as streaming methods) please see our +[gRPC installation guide](https://cloud.google.com/php/grpc). + +### Authentication + +Please see our [Authentication guide](https://github.com/googleapis/google-cloud-php/blob/main/AUTHENTICATION.md) for more information +on authenticating your client. Once authenticated, you'll be ready to start making requests. + +### Sample + +See the [samples directory](https://github.com/googleapis/google-cloud-php-managedkafka/tree/main/samples) for a canonical list of samples. + +### Version + +This component is considered alpha. As such, it is still a work-in-progress and is more likely to get backwards-incompatible updates. + +### Next Steps + +1. Understand the [official documentation](https://cloud.google.com/managed-kafka). diff --git a/ManagedKafka/VERSION b/ManagedKafka/VERSION new file mode 100644 index 000000000000..77d6f4ca2371 --- /dev/null +++ b/ManagedKafka/VERSION @@ -0,0 +1 @@ +0.0.0 diff --git a/ManagedKafka/composer.json b/ManagedKafka/composer.json new file mode 100644 index 000000000000..23748456fbe1 --- /dev/null +++ b/ManagedKafka/composer.json @@ -0,0 +1,30 @@ +{ + "name": "google/cloud-managedkafka", + "description": "Google Cloud Managed Kafka Client for PHP", + "license": "Apache-2.0", + "minimum-stability": "stable", + "autoload": { + "psr-4": { + "Google\\Cloud\\ManagedKafka\\": "src", + "GPBMetadata\\Google\\Cloud\\Managedkafka\\": "metadata" + } + }, + "extra": { + "component": { + "id": "cloud-managedkafka", + "path": "ManagedKafka", + "target": "googleapis/google-cloud-php-managedkafka" + } + }, + "require": { + "php": "^8.0", + "google/gax": "^1.33.0" + }, + "require-dev": { + "phpunit/phpunit": "^9.0" + }, + "suggest": { + "ext-grpc": "Enables use of gRPC, a universal high-performance RPC framework created by Google.", + "ext-protobuf": "Provides a significant increase in throughput over the pure PHP protobuf implementation. See https://cloud.google.com/php/grpc for installation instructions." + } +} diff --git a/ManagedKafka/metadata/V1/ManagedKafka.php b/ManagedKafka/metadata/V1/ManagedKafka.php new file mode 100644 index 000000000000..10674c4cc346 --- /dev/null +++ b/ManagedKafka/metadata/V1/ManagedKafka.php @@ -0,0 +1,125 @@ +internalAddGeneratedFile( + ' +ñ+ +0google/cloud/managedkafka/v1/managed_kafka.protogoogle.cloud.managedkafka.v1google/api/client.protogoogle/api/field_behavior.protogoogle/api/field_info.protogoogle/api/resource.proto,google/cloud/managedkafka/v1/resources.proto#google/longrunning/operations.protogoogle/protobuf/empty.proto google/protobuf/field_mask.proto"¯ +ListClustersRequest; +parent ( B+àAúA%#managedkafka.googleapis.com/Cluster + page_size (BàA + +page_token ( BàA +filter ( BàA +order_by ( BàA"} +ListClustersResponse7 +clusters ( 2%.google.cloud.managedkafka.v1.Cluster +next_page_token (  + unreachable ( "N +GetClusterRequest9 +name ( B+àAúA% +#managedkafka.googleapis.com/Cluster"Ê +CreateClusterRequest; +parent ( B+àAúA%#managedkafka.googleapis.com/Cluster + +cluster_id ( BàA; +cluster ( 2%.google.cloud.managedkafka.v1.ClusterBàA + +request_id ( B àAâŒÏ×"ª +UpdateClusterRequest4 + update_mask ( 2.google.protobuf.FieldMaskBàA; +cluster ( 2%.google.cloud.managedkafka.v1.ClusterBàA + +request_id ( B àAâŒÏ×"r +DeleteClusterRequest9 +name ( B+àAúA% +#managedkafka.googleapis.com/Cluster + +request_id ( B àAâŒÏ×" +ListTopicsRequest9 +parent ( B)àAúA#!managedkafka.googleapis.com/Topic + page_size (BàA + +page_token ( BàA"b +ListTopicsResponse3 +topics ( 2#.google.cloud.managedkafka.v1.Topic +next_page_token ( "J +GetTopicRequest7 +name ( B)àAúA# +!managedkafka.googleapis.com/Topic"Ÿ +CreateTopicRequest9 +parent ( B)àAúA#!managedkafka.googleapis.com/Topic +topic_id ( BàA7 +topic ( 2#.google.cloud.managedkafka.v1.TopicBàA"ƒ +UpdateTopicRequest4 + update_mask ( 2.google.protobuf.FieldMaskBàA7 +topic ( 2#.google.cloud.managedkafka.v1.TopicBàA"M +DeleteTopicRequest7 +name ( B)àAúA# +!managedkafka.googleapis.com/Topic" +ListConsumerGroupsRequestA +parent ( B1àAúA+)managedkafka.googleapis.com/ConsumerGroup + page_size (BàA + +page_token ( BàA"{ +ListConsumerGroupsResponseD +consumer_groups ( 2+.google.cloud.managedkafka.v1.ConsumerGroup +next_page_token ( "Z +GetConsumerGroupRequest? +name ( B1àAúA+ +)managedkafka.googleapis.com/ConsumerGroup"œ +UpdateConsumerGroupRequest4 + update_mask ( 2.google.protobuf.FieldMaskBàAH +consumer_group ( 2+.google.cloud.managedkafka.v1.ConsumerGroupBàA"] +DeleteConsumerGroupRequest? +name ( B1àAúA+ +)managedkafka.googleapis.com/ConsumerGroup2² + ManagedKafka´ + ListClusters1.google.cloud.managedkafka.v1.ListClustersRequest2.google.cloud.managedkafka.v1.ListClustersResponse"=ÚAparent‚Óä“.,/v1/{parent=projects/*/locations/*}/clusters¡ + +GetCluster/.google.cloud.managedkafka.v1.GetClusterRequest%.google.cloud.managedkafka.v1.Cluster";ÚAname‚Óä“.,/v1/{name=projects/*/locations/*/clusters/*}Ü + CreateCluster2.google.cloud.managedkafka.v1.CreateClusterRequest.google.longrunning.Operation"xÊA +ClusterOperationMetadataÚAparent,cluster,cluster_id‚Óä“7",/v1/{parent=projects/*/locations/*}/clusters:clusterÞ + UpdateCluster2.google.cloud.managedkafka.v1.UpdateClusterRequest.google.longrunning.Operation"zÊA +ClusterOperationMetadataÚAcluster,update_mask‚Óä“?24/v1/{cluster.name=projects/*/locations/*/clusters/*}:clusterÌ + DeleteCluster2.google.cloud.managedkafka.v1.DeleteClusterRequest.google.longrunning.Operation"hÊA* +google.protobuf.EmptyOperationMetadataÚAname‚Óä“.*,/v1/{name=projects/*/locations/*/clusters/*}· + +ListTopics/.google.cloud.managedkafka.v1.ListTopicsRequest0.google.cloud.managedkafka.v1.ListTopicsResponse"FÚAparent‚Óä“75/v1/{parent=projects/*/locations/*/clusters/*}/topics¤ +GetTopic-.google.cloud.managedkafka.v1.GetTopicRequest#.google.cloud.managedkafka.v1.Topic"DÚAname‚Óä“75/v1/{name=projects/*/locations/*/clusters/*/topics/*} + CreateTopic0.google.cloud.managedkafka.v1.CreateTopicRequest#.google.cloud.managedkafka.v1.Topic"\\ÚAparent,topic,topic_id‚Óä“>"5/v1/{parent=projects/*/locations/*/clusters/*}/topics:topicÄ + UpdateTopic0.google.cloud.managedkafka.v1.UpdateTopicRequest#.google.cloud.managedkafka.v1.Topic"^ÚAtopic,update_mask‚Óä“D2;/v1/{topic.name=projects/*/locations/*/clusters/*/topics/*}:topic + DeleteTopic0.google.cloud.managedkafka.v1.DeleteTopicRequest.google.protobuf.Empty"DÚAname‚Óä“7*5/v1/{name=projects/*/locations/*/clusters/*/topics/*}× +ListConsumerGroups7.google.cloud.managedkafka.v1.ListConsumerGroupsRequest8.google.cloud.managedkafka.v1.ListConsumerGroupsResponse"NÚAparent‚Óä“?=/v1/{parent=projects/*/locations/*/clusters/*}/consumerGroupsÄ +GetConsumerGroup5.google.cloud.managedkafka.v1.GetConsumerGroupRequest+.google.cloud.managedkafka.v1.ConsumerGroup"LÚAname‚Óä“?=/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}€ +UpdateConsumerGroup8.google.cloud.managedkafka.v1.UpdateConsumerGroupRequest+.google.cloud.managedkafka.v1.ConsumerGroup"ÚAconsumer_group,update_mask‚Óä“^2L/v1/{consumer_group.name=projects/*/locations/*/clusters/*/consumerGroups/*}:consumer_groupµ +DeleteConsumerGroup8.google.cloud.managedkafka.v1.DeleteConsumerGroupRequest.google.protobuf.Empty"LÚAname‚Óä“?*=/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}OÊAmanagedkafka.googleapis.comÒA.https://www.googleapis.com/auth/cloud-platformBÝ + com.google.cloud.managedkafka.v1BManagedKafkaProtoPZDcloud.google.com/go/managedkafka/apiv1/managedkafkapb;managedkafkapbªGoogle.Cloud.ManagedKafka.V1ÊGoogle\\Cloud\\ManagedKafka\\V1êGoogle::Cloud::ManagedKafka::V1bproto3' + , true); + + static::$is_initialized = true; + } +} + diff --git a/ManagedKafka/metadata/V1/Resources.php b/ManagedKafka/metadata/V1/Resources.php new file mode 100644 index 0000000000000000000000000000000000000000..f98ecbb0893434fb2ce3aaad9375d2157fadc0d0 GIT binary patch literal 3792 zcmbtXOK;mo5RN3vkC7iT+cb(Bw2T}ej_XjhQlJG>H=rI?)!4FND*+k^ASi03Fs4YB zTsl<)0qI}pk0^@#l-`S8(`$}7_SD%WDN~LsJFPAvmou|F-+VJW^YsJgsRQd`jabCh zDCvrwp-7(+(Q$2R>$WL&{9cc^qHYfy!z8XGij@PganKU0m3r$Ys(QBX>O>a%w%s>L zK{su`TNr4T)+gO#t#_;yPPPj!3B){!8wRkn0r4D7Ct|Hx+9y=&YE*02xQ%w1*=X;_ zY_uP5-v`i5&GW>=$QHZ+5r2-~F%4bxs7CScmf@*}Wl%#iji1n;9kHjG9zpGQWqZD* zQ^U4Irf^{CWaEw)*krP|92@=L5u;Nz86*ftyKYh9TAJCe5KngvhuUrvMP<29=om=0 zvT-NemYFkp;zn>rS$^k`J0f*`65m}ET|#}=y3fze42;H4?Q<|w9HYH!5VKn%Pqh=n zc4hh0t5YNYrW%2Id95)VEdxVIwSjZKE{E~%H%17W((R5h+C@ffjJ;w$-iEi=VC#LT zB-{w3B~CmkPCV&kd+mB?OLL4d2?GwUeHwn^jKsJWXyiyBkbj$oxw7ed7;T{h(AOPR zx2>Mh&n<|FjZCq9Tbj1FRMVX@moKGWE6H!K2&*u|c%Mt4`T|>=7ruau?h?#AmF=EO za>Hwp-RNl27-OcDVp_g~853%yE~m8%v!>P|rk9%$Q+WREXq$7PlS0>}J+4!=s5@l4 zCxq`{Mb|KQ4LXbfN%IqIU0^U&ih&~RLDnT5&D1daB4hL1*xl*I4kN8oYYE{2q*;oQ z9H5Ei;%C!ML=GM?6%Op$UC6}T6*7=ICc_|J!V;uUG}8})v>+$HO5}GT&6S0#uzJ)| zT2=K?PFvDJi4S48{>?w_M6=DAb z)|5xB*qVCKP>;&W9;!C8$^0!?Y7qL|c8>!p2_L|$=XWeZxkEfrllkvqQPFkcd7)4p zR#1Xzg}`};49ltuNH?(+hcW+40v5)#LfC~Rjc*jCLy+B@)8UGrEeOsI%pDIrmGPHV zm4(t4=I(1H{kO6XH+jb6=g{aW&DbiMyk%`Gqy^g} z9qgP3!6gpf$()sHPt);+%rQ68rCH<+9s<3b(|K)-iqYrO{o_ZG0_?rhTC^s?VtnzU zjFn#|-Y6ruIW5%!uj~Ik(F&B|hMC9zN1L<3doj)b^M57aDxTT%2gGeL$*BDi+HHU_ zt`VQg(}6kHlGKc%6UXWqtejbo!^L6}D{z_OmzUdkY%+3#SH>D#k52ZE0822t*YhyJ z*i9G~ldCX42>Za(yi4SNPQg-Sspi_ga~>b}AjB9oSq@(E=-i&fV-Uxw1DV)b9HRy7 z112!KcR7qE!p!iIUm*{FO~9RLM4WW{$^Rgv9@33F%&vs+UF|bON1MyhLLg9Fnt0E@ zOv37c!v+M*_bAL~TNf6}O!30jWgF&+)qSsS2kAmJ7n^HdLwG$-F&3NxmgsNXO$}@_gGIc`FhQ9}9g5|@H- zbXgAKt#fEteWV&bsV+WZM-tsFjDY-^T~4FUEE=Wuz1!nKU@I7gVzUzqtD&DWH*v{e Jx*5R;;2)}G%X9z$ literal 0 HcmV?d00001 diff --git a/ManagedKafka/owlbot.py b/ManagedKafka/owlbot.py new file mode 100644 index 000000000000..b4940284bb67 --- /dev/null +++ b/ManagedKafka/owlbot.py @@ -0,0 +1,56 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script is used to synthesize generated parts of this library.""" + +import logging +from pathlib import Path +import subprocess + +import synthtool as s +from synthtool.languages import php +from synthtool import _tracked_paths + +logging.basicConfig(level=logging.DEBUG) + +src = Path(f"../{php.STAGING_DIR}/ManagedKafka").resolve() +dest = Path().resolve() + +# Added so that we can pass copy_excludes in the owlbot_main() call +_tracked_paths.add(src) + +php.owlbot_main(src=src, dest=dest) + +# remove class_alias code +s.replace( + "src/V*/**/*.php", + r"^// Adding a class alias for backwards compatibility with the previous class name.$" + + "\n" + + r"^class_alias\(.*\);$" + + "\n", + '') + +# format generated clients +subprocess.run([ + 'npm', + 'exec', + '--yes', + '--package=@prettier/plugin-php@^0.16', + '--', + 'prettier', + '**/Client/*', + '--write', + '--parser=php', + '--single-quote', + '--print-width=120']) diff --git a/ManagedKafka/phpunit.xml.dist b/ManagedKafka/phpunit.xml.dist new file mode 100644 index 000000000000..5924aa644c26 --- /dev/null +++ b/ManagedKafka/phpunit.xml.dist @@ -0,0 +1,16 @@ + + + + + src + + + src/V[!a-zA-Z]* + + + + + tests/Unit + + + diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/create_cluster.php b/ManagedKafka/samples/V1/ManagedKafkaClient/create_cluster.php new file mode 100644 index 000000000000..2a7f3f1cc01a --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/create_cluster.php @@ -0,0 +1,138 @@ +setSubnet($clusterGcpConfigAccessConfigNetworkConfigsSubnet); + $clusterGcpConfigAccessConfigNetworkConfigs = [$networkConfig,]; + $clusterGcpConfigAccessConfig = (new AccessConfig()) + ->setNetworkConfigs($clusterGcpConfigAccessConfigNetworkConfigs); + $clusterGcpConfig = (new GcpConfig()) + ->setAccessConfig($clusterGcpConfigAccessConfig); + $clusterCapacityConfig = (new CapacityConfig()) + ->setVcpuCount($clusterCapacityConfigVcpuCount) + ->setMemoryBytes($clusterCapacityConfigMemoryBytes); + $cluster = (new Cluster()) + ->setGcpConfig($clusterGcpConfig) + ->setCapacityConfig($clusterCapacityConfig); + $request = (new CreateClusterRequest()) + ->setParent($formattedParent) + ->setClusterId($clusterId) + ->setCluster($cluster); + + // Call the API and handle any network failures. + try { + /** @var OperationResponse $response */ + $response = $managedKafkaClient->createCluster($request); + $response->pollUntilComplete(); + + if ($response->operationSucceeded()) { + /** @var Cluster $result */ + $result = $response->getResult(); + printf('Operation successful with response data: %s' . PHP_EOL, $result->serializeToJsonString()); + } else { + /** @var Status $error */ + $error = $response->getError(); + printf('Operation failed with error data: %s' . PHP_EOL, $error->serializeToJsonString()); + } + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedParent = ManagedKafkaClient::locationName('[PROJECT]', '[LOCATION]'); + $clusterId = '[CLUSTER_ID]'; + $clusterGcpConfigAccessConfigNetworkConfigsSubnet = '[SUBNET]'; + $clusterCapacityConfigVcpuCount = 0; + $clusterCapacityConfigMemoryBytes = 0; + + create_cluster_sample( + $formattedParent, + $clusterId, + $clusterGcpConfigAccessConfigNetworkConfigsSubnet, + $clusterCapacityConfigVcpuCount, + $clusterCapacityConfigMemoryBytes + ); +} +// [END managedkafka_v1_generated_ManagedKafka_CreateCluster_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/create_topic.php b/ManagedKafka/samples/V1/ManagedKafkaClient/create_topic.php new file mode 100644 index 000000000000..0cc45662eb29 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/create_topic.php @@ -0,0 +1,95 @@ +setPartitionCount($topicPartitionCount) + ->setReplicationFactor($topicReplicationFactor); + $request = (new CreateTopicRequest()) + ->setParent($formattedParent) + ->setTopicId($topicId) + ->setTopic($topic); + + // Call the API and handle any network failures. + try { + /** @var Topic $response */ + $response = $managedKafkaClient->createTopic($request); + printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString()); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedParent = ManagedKafkaClient::clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $topicId = '[TOPIC_ID]'; + $topicPartitionCount = 0; + $topicReplicationFactor = 0; + + create_topic_sample($formattedParent, $topicId, $topicPartitionCount, $topicReplicationFactor); +} +// [END managedkafka_v1_generated_ManagedKafka_CreateTopic_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/delete_cluster.php b/ManagedKafka/samples/V1/ManagedKafkaClient/delete_cluster.php new file mode 100644 index 000000000000..f028249f7cd8 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/delete_cluster.php @@ -0,0 +1,80 @@ +setName($formattedName); + + // Call the API and handle any network failures. + try { + /** @var OperationResponse $response */ + $response = $managedKafkaClient->deleteCluster($request); + $response->pollUntilComplete(); + + if ($response->operationSucceeded()) { + printf('Operation completed successfully.' . PHP_EOL); + } else { + /** @var Status $error */ + $error = $response->getError(); + printf('Operation failed with error data: %s' . PHP_EOL, $error->serializeToJsonString()); + } + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedName = ManagedKafkaClient::clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + + delete_cluster_sample($formattedName); +} +// [END managedkafka_v1_generated_ManagedKafka_DeleteCluster_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/delete_consumer_group.php b/ManagedKafka/samples/V1/ManagedKafkaClient/delete_consumer_group.php new file mode 100644 index 000000000000..d953c6d7c30e --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/delete_consumer_group.php @@ -0,0 +1,75 @@ +setName($formattedName); + + // Call the API and handle any network failures. + try { + $managedKafkaClient->deleteConsumerGroup($request); + printf('Call completed successfully.' . PHP_EOL); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedName = ManagedKafkaClient::consumerGroupName( + '[PROJECT]', + '[LOCATION]', + '[CLUSTER]', + '[CONSUMER_GROUP]' + ); + + delete_consumer_group_sample($formattedName); +} +// [END managedkafka_v1_generated_ManagedKafka_DeleteConsumerGroup_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/delete_topic.php b/ManagedKafka/samples/V1/ManagedKafkaClient/delete_topic.php new file mode 100644 index 000000000000..f2e8ee9e3cbf --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/delete_topic.php @@ -0,0 +1,70 @@ +setName($formattedName); + + // Call the API and handle any network failures. + try { + $managedKafkaClient->deleteTopic($request); + printf('Call completed successfully.' . PHP_EOL); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedName = ManagedKafkaClient::topicName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[TOPIC]'); + + delete_topic_sample($formattedName); +} +// [END managedkafka_v1_generated_ManagedKafka_DeleteTopic_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/get_cluster.php b/ManagedKafka/samples/V1/ManagedKafkaClient/get_cluster.php new file mode 100644 index 000000000000..5c3fc319d4f9 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/get_cluster.php @@ -0,0 +1,71 @@ +setName($formattedName); + + // Call the API and handle any network failures. + try { + /** @var Cluster $response */ + $response = $managedKafkaClient->getCluster($request); + printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString()); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedName = ManagedKafkaClient::clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + + get_cluster_sample($formattedName); +} +// [END managedkafka_v1_generated_ManagedKafka_GetCluster_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/get_consumer_group.php b/ManagedKafka/samples/V1/ManagedKafkaClient/get_consumer_group.php new file mode 100644 index 000000000000..ebbfda931847 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/get_consumer_group.php @@ -0,0 +1,77 @@ +setName($formattedName); + + // Call the API and handle any network failures. + try { + /** @var ConsumerGroup $response */ + $response = $managedKafkaClient->getConsumerGroup($request); + printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString()); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedName = ManagedKafkaClient::consumerGroupName( + '[PROJECT]', + '[LOCATION]', + '[CLUSTER]', + '[CONSUMER_GROUP]' + ); + + get_consumer_group_sample($formattedName); +} +// [END managedkafka_v1_generated_ManagedKafka_GetConsumerGroup_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/get_location.php b/ManagedKafka/samples/V1/ManagedKafkaClient/get_location.php new file mode 100644 index 000000000000..54ef42f76ae7 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/get_location.php @@ -0,0 +1,57 @@ +getLocation($request); + printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString()); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} +// [END managedkafka_v1_generated_ManagedKafka_GetLocation_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/get_topic.php b/ManagedKafka/samples/V1/ManagedKafkaClient/get_topic.php new file mode 100644 index 000000000000..48c3f4d04e00 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/get_topic.php @@ -0,0 +1,73 @@ +setName($formattedName); + + // Call the API and handle any network failures. + try { + /** @var Topic $response */ + $response = $managedKafkaClient->getTopic($request); + printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString()); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedName = ManagedKafkaClient::topicName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[TOPIC]'); + + get_topic_sample($formattedName); +} +// [END managedkafka_v1_generated_ManagedKafka_GetTopic_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/list_clusters.php b/ManagedKafka/samples/V1/ManagedKafkaClient/list_clusters.php new file mode 100644 index 000000000000..0a4b0a051a9f --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/list_clusters.php @@ -0,0 +1,77 @@ +setParent($formattedParent); + + // Call the API and handle any network failures. + try { + /** @var PagedListResponse $response */ + $response = $managedKafkaClient->listClusters($request); + + /** @var Cluster $element */ + foreach ($response as $element) { + printf('Element data: %s' . PHP_EOL, $element->serializeToJsonString()); + } + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedParent = ManagedKafkaClient::locationName('[PROJECT]', '[LOCATION]'); + + list_clusters_sample($formattedParent); +} +// [END managedkafka_v1_generated_ManagedKafka_ListClusters_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/list_consumer_groups.php b/ManagedKafka/samples/V1/ManagedKafkaClient/list_consumer_groups.php new file mode 100644 index 000000000000..7ab0b78142b1 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/list_consumer_groups.php @@ -0,0 +1,78 @@ +setParent($formattedParent); + + // Call the API and handle any network failures. + try { + /** @var PagedListResponse $response */ + $response = $managedKafkaClient->listConsumerGroups($request); + + /** @var ConsumerGroup $element */ + foreach ($response as $element) { + printf('Element data: %s' . PHP_EOL, $element->serializeToJsonString()); + } + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedParent = ManagedKafkaClient::clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + + list_consumer_groups_sample($formattedParent); +} +// [END managedkafka_v1_generated_ManagedKafka_ListConsumerGroups_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/list_locations.php b/ManagedKafka/samples/V1/ManagedKafkaClient/list_locations.php new file mode 100644 index 000000000000..c7aec8b4850e --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/list_locations.php @@ -0,0 +1,62 @@ +listLocations($request); + + /** @var Location $element */ + foreach ($response as $element) { + printf('Element data: %s' . PHP_EOL, $element->serializeToJsonString()); + } + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} +// [END managedkafka_v1_generated_ManagedKafka_ListLocations_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/list_topics.php b/ManagedKafka/samples/V1/ManagedKafkaClient/list_topics.php new file mode 100644 index 000000000000..90860be6b9db --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/list_topics.php @@ -0,0 +1,77 @@ +setParent($formattedParent); + + // Call the API and handle any network failures. + try { + /** @var PagedListResponse $response */ + $response = $managedKafkaClient->listTopics($request); + + /** @var Topic $element */ + foreach ($response as $element) { + printf('Element data: %s' . PHP_EOL, $element->serializeToJsonString()); + } + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $formattedParent = ManagedKafkaClient::clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + + list_topics_sample($formattedParent); +} +// [END managedkafka_v1_generated_ManagedKafka_ListTopics_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/update_cluster.php b/ManagedKafka/samples/V1/ManagedKafkaClient/update_cluster.php new file mode 100644 index 000000000000..61e3c38aa419 --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/update_cluster.php @@ -0,0 +1,124 @@ +setSubnet($clusterGcpConfigAccessConfigNetworkConfigsSubnet); + $clusterGcpConfigAccessConfigNetworkConfigs = [$networkConfig,]; + $clusterGcpConfigAccessConfig = (new AccessConfig()) + ->setNetworkConfigs($clusterGcpConfigAccessConfigNetworkConfigs); + $clusterGcpConfig = (new GcpConfig()) + ->setAccessConfig($clusterGcpConfigAccessConfig); + $clusterCapacityConfig = (new CapacityConfig()) + ->setVcpuCount($clusterCapacityConfigVcpuCount) + ->setMemoryBytes($clusterCapacityConfigMemoryBytes); + $cluster = (new Cluster()) + ->setGcpConfig($clusterGcpConfig) + ->setCapacityConfig($clusterCapacityConfig); + $request = (new UpdateClusterRequest()) + ->setUpdateMask($updateMask) + ->setCluster($cluster); + + // Call the API and handle any network failures. + try { + /** @var OperationResponse $response */ + $response = $managedKafkaClient->updateCluster($request); + $response->pollUntilComplete(); + + if ($response->operationSucceeded()) { + /** @var Cluster $result */ + $result = $response->getResult(); + printf('Operation successful with response data: %s' . PHP_EOL, $result->serializeToJsonString()); + } else { + /** @var Status $error */ + $error = $response->getError(); + printf('Operation failed with error data: %s' . PHP_EOL, $error->serializeToJsonString()); + } + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $clusterGcpConfigAccessConfigNetworkConfigsSubnet = '[SUBNET]'; + $clusterCapacityConfigVcpuCount = 0; + $clusterCapacityConfigMemoryBytes = 0; + + update_cluster_sample( + $clusterGcpConfigAccessConfigNetworkConfigsSubnet, + $clusterCapacityConfigVcpuCount, + $clusterCapacityConfigMemoryBytes + ); +} +// [END managedkafka_v1_generated_ManagedKafka_UpdateCluster_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/update_consumer_group.php b/ManagedKafka/samples/V1/ManagedKafkaClient/update_consumer_group.php new file mode 100644 index 000000000000..91b8e7b0091f --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/update_consumer_group.php @@ -0,0 +1,62 @@ +setUpdateMask($updateMask) + ->setConsumerGroup($consumerGroup); + + // Call the API and handle any network failures. + try { + /** @var ConsumerGroup $response */ + $response = $managedKafkaClient->updateConsumerGroup($request); + printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString()); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} +// [END managedkafka_v1_generated_ManagedKafka_UpdateConsumerGroup_sync] diff --git a/ManagedKafka/samples/V1/ManagedKafkaClient/update_topic.php b/ManagedKafka/samples/V1/ManagedKafkaClient/update_topic.php new file mode 100644 index 000000000000..cc41bac1056f --- /dev/null +++ b/ManagedKafka/samples/V1/ManagedKafkaClient/update_topic.php @@ -0,0 +1,82 @@ +setPartitionCount($topicPartitionCount) + ->setReplicationFactor($topicReplicationFactor); + $request = (new UpdateTopicRequest()) + ->setUpdateMask($updateMask) + ->setTopic($topic); + + // Call the API and handle any network failures. + try { + /** @var Topic $response */ + $response = $managedKafkaClient->updateTopic($request); + printf('Response data: %s' . PHP_EOL, $response->serializeToJsonString()); + } catch (ApiException $ex) { + printf('Call failed with message: %s' . PHP_EOL, $ex->getMessage()); + } +} + +/** + * Helper to execute the sample. + * + * This sample has been automatically generated and should be regarded as a code + * template only. It will require modifications to work: + * - It may require correct/in-range values for request initialization. + * - It may require specifying regional endpoints when creating the service client, + * please see the apiEndpoint client configuration option for more details. + */ +function callSample(): void +{ + $topicPartitionCount = 0; + $topicReplicationFactor = 0; + + update_topic_sample($topicPartitionCount, $topicReplicationFactor); +} +// [END managedkafka_v1_generated_ManagedKafka_UpdateTopic_sync] diff --git a/ManagedKafka/src/V1/AccessConfig.php b/ManagedKafka/src/V1/AccessConfig.php new file mode 100644 index 000000000000..7b0b55d42851 --- /dev/null +++ b/ManagedKafka/src/V1/AccessConfig.php @@ -0,0 +1,75 @@ +google.cloud.managedkafka.v1.AccessConfig + */ +class AccessConfig extends \Google\Protobuf\Internal\Message +{ + /** + * Required. Virtual Private Cloud (VPC) networks that must be granted direct + * access to the Kafka cluster. Minimum of 1 network is required. Maximum 10 + * networks can be specified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.NetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + private $network_configs; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type array<\Google\Cloud\ManagedKafka\V1\NetworkConfig>|\Google\Protobuf\Internal\RepeatedField $network_configs + * Required. Virtual Private Cloud (VPC) networks that must be granted direct + * access to the Kafka cluster. Minimum of 1 network is required. Maximum 10 + * networks can be specified. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Required. Virtual Private Cloud (VPC) networks that must be granted direct + * access to the Kafka cluster. Minimum of 1 network is required. Maximum 10 + * networks can be specified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.NetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Protobuf\Internal\RepeatedField + */ + public function getNetworkConfigs() + { + return $this->network_configs; + } + + /** + * Required. Virtual Private Cloud (VPC) networks that must be granted direct + * access to the Kafka cluster. Minimum of 1 network is required. Maximum 10 + * networks can be specified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.NetworkConfig network_configs = 1 [(.google.api.field_behavior) = REQUIRED]; + * @param array<\Google\Cloud\ManagedKafka\V1\NetworkConfig>|\Google\Protobuf\Internal\RepeatedField $var + * @return $this + */ + public function setNetworkConfigs($var) + { + $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\ManagedKafka\V1\NetworkConfig::class); + $this->network_configs = $arr; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/CapacityConfig.php b/ManagedKafka/src/V1/CapacityConfig.php new file mode 100644 index 000000000000..491c262bce33 --- /dev/null +++ b/ManagedKafka/src/V1/CapacityConfig.php @@ -0,0 +1,109 @@ +google.cloud.managedkafka.v1.CapacityConfig + */ +class CapacityConfig extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The number of vCPUs to provision for the cluster. Minimum: 3. + * + * Generated from protobuf field int64 vcpu_count = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $vcpu_count = 0; + /** + * Required. The memory to provision for the cluster in bytes. + * The CPU:memory ratio (vCPU:GiB) must be between 1:1 and 1:8. + * Minimum: 3221225472 (3 GiB). + * + * Generated from protobuf field int64 memory_bytes = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $memory_bytes = 0; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type int|string $vcpu_count + * Required. The number of vCPUs to provision for the cluster. Minimum: 3. + * @type int|string $memory_bytes + * Required. The memory to provision for the cluster in bytes. + * The CPU:memory ratio (vCPU:GiB) must be between 1:1 and 1:8. + * Minimum: 3221225472 (3 GiB). + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Required. The number of vCPUs to provision for the cluster. Minimum: 3. + * + * Generated from protobuf field int64 vcpu_count = 1 [(.google.api.field_behavior) = REQUIRED]; + * @return int|string + */ + public function getVcpuCount() + { + return $this->vcpu_count; + } + + /** + * Required. The number of vCPUs to provision for the cluster. Minimum: 3. + * + * Generated from protobuf field int64 vcpu_count = 1 [(.google.api.field_behavior) = REQUIRED]; + * @param int|string $var + * @return $this + */ + public function setVcpuCount($var) + { + GPBUtil::checkInt64($var); + $this->vcpu_count = $var; + + return $this; + } + + /** + * Required. The memory to provision for the cluster in bytes. + * The CPU:memory ratio (vCPU:GiB) must be between 1:1 and 1:8. + * Minimum: 3221225472 (3 GiB). + * + * Generated from protobuf field int64 memory_bytes = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return int|string + */ + public function getMemoryBytes() + { + return $this->memory_bytes; + } + + /** + * Required. The memory to provision for the cluster in bytes. + * The CPU:memory ratio (vCPU:GiB) must be between 1:1 and 1:8. + * Minimum: 3221225472 (3 GiB). + * + * Generated from protobuf field int64 memory_bytes = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param int|string $var + * @return $this + */ + public function setMemoryBytes($var) + { + GPBUtil::checkInt64($var); + $this->memory_bytes = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/Client/ManagedKafkaClient.php b/ManagedKafka/src/V1/Client/ManagedKafkaClient.php new file mode 100644 index 000000000000..7cbb343f0498 --- /dev/null +++ b/ManagedKafka/src/V1/Client/ManagedKafkaClient.php @@ -0,0 +1,802 @@ + self::SERVICE_NAME, + 'apiEndpoint' => self::SERVICE_ADDRESS . ':' . self::DEFAULT_SERVICE_PORT, + 'clientConfig' => __DIR__ . '/../resources/managed_kafka_client_config.json', + 'descriptorsConfigPath' => __DIR__ . '/../resources/managed_kafka_descriptor_config.php', + 'gcpApiConfigPath' => __DIR__ . '/../resources/managed_kafka_grpc_config.json', + 'credentialsConfig' => [ + 'defaultScopes' => self::$serviceScopes, + ], + 'transportConfig' => [ + 'rest' => [ + 'restClientConfigPath' => __DIR__ . '/../resources/managed_kafka_rest_client_config.php', + ], + ], + ]; + } + + /** + * Return an OperationsClient object with the same endpoint as $this. + * + * @return OperationsClient + */ + public function getOperationsClient() + { + return $this->operationsClient; + } + + /** + * Resume an existing long running operation that was previously started by a long + * running API method. If $methodName is not provided, or does not match a long + * running API method, then the operation can still be resumed, but the + * OperationResponse object will not deserialize the final response. + * + * @param string $operationName The name of the long running operation + * @param string $methodName The name of the method used to start the operation + * + * @return OperationResponse + */ + public function resumeOperation($operationName, $methodName = null) + { + $options = isset($this->descriptors[$methodName]['longRunning']) + ? $this->descriptors[$methodName]['longRunning'] + : []; + $operation = new OperationResponse($operationName, $this->getOperationsClient(), $options); + $operation->reload(); + return $operation; + } + + /** + * Create the default operation client for the service. + * + * @param array $options ClientOptions for the client. + * + * @return OperationsClient + */ + private function createOperationsClient(array $options) + { + // Unset client-specific configuration options + unset($options['serviceName'], $options['clientConfig'], $options['descriptorsConfigPath']); + + if (isset($options['operationsClient'])) { + return $options['operationsClient']; + } + + return new OperationsClient($options); + } + + /** + * Formats a string containing the fully-qualified path to represent a cluster + * resource. + * + * @param string $project + * @param string $location + * @param string $cluster + * + * @return string The formatted cluster resource. + */ + public static function clusterName(string $project, string $location, string $cluster): string + { + return self::getPathTemplate('cluster')->render([ + 'project' => $project, + 'location' => $location, + 'cluster' => $cluster, + ]); + } + + /** + * Formats a string containing the fully-qualified path to represent a + * consumer_group resource. + * + * @param string $project + * @param string $location + * @param string $cluster + * @param string $consumerGroup + * + * @return string The formatted consumer_group resource. + */ + public static function consumerGroupName( + string $project, + string $location, + string $cluster, + string $consumerGroup + ): string { + return self::getPathTemplate('consumerGroup')->render([ + 'project' => $project, + 'location' => $location, + 'cluster' => $cluster, + 'consumer_group' => $consumerGroup, + ]); + } + + /** + * Formats a string containing the fully-qualified path to represent a crypto_key + * resource. + * + * @param string $project + * @param string $location + * @param string $keyRing + * @param string $cryptoKey + * + * @return string The formatted crypto_key resource. + */ + public static function cryptoKeyName(string $project, string $location, string $keyRing, string $cryptoKey): string + { + return self::getPathTemplate('cryptoKey')->render([ + 'project' => $project, + 'location' => $location, + 'key_ring' => $keyRing, + 'crypto_key' => $cryptoKey, + ]); + } + + /** + * Formats a string containing the fully-qualified path to represent a location + * resource. + * + * @param string $project + * @param string $location + * + * @return string The formatted location resource. + */ + public static function locationName(string $project, string $location): string + { + return self::getPathTemplate('location')->render([ + 'project' => $project, + 'location' => $location, + ]); + } + + /** + * Formats a string containing the fully-qualified path to represent a topic + * resource. + * + * @param string $project + * @param string $location + * @param string $cluster + * @param string $topic + * + * @return string The formatted topic resource. + */ + public static function topicName(string $project, string $location, string $cluster, string $topic): string + { + return self::getPathTemplate('topic')->render([ + 'project' => $project, + 'location' => $location, + 'cluster' => $cluster, + 'topic' => $topic, + ]); + } + + /** + * Parses a formatted name string and returns an associative array of the components in the name. + * The following name formats are supported: + * Template: Pattern + * - cluster: projects/{project}/locations/{location}/clusters/{cluster} + * - consumerGroup: projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group} + * - cryptoKey: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key} + * - location: projects/{project}/locations/{location} + * - topic: projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * + * The optional $template argument can be supplied to specify a particular pattern, + * and must match one of the templates listed above. If no $template argument is + * provided, or if the $template argument does not match one of the templates + * listed, then parseName will check each of the supported templates, and return + * the first match. + * + * @param string $formattedName The formatted name string + * @param string $template Optional name of template to match + * + * @return array An associative array from name component IDs to component values. + * + * @throws ValidationException If $formattedName could not be matched. + */ + public static function parseName(string $formattedName, string $template = null): array + { + return self::parseFormattedName($formattedName, $template); + } + + /** + * Constructor. + * + * @param array $options { + * Optional. Options for configuring the service API wrapper. + * + * @type string $apiEndpoint + * The address of the API remote host. May optionally include the port, formatted + * as ":". Default 'managedkafka.googleapis.com:443'. + * @type string|array|FetchAuthTokenInterface|CredentialsWrapper $credentials + * The credentials to be used by the client to authorize API calls. This option + * accepts either a path to a credentials file, or a decoded credentials file as a + * PHP array. + * *Advanced usage*: In addition, this option can also accept a pre-constructed + * {@see \Google\Auth\FetchAuthTokenInterface} object or + * {@see \Google\ApiCore\CredentialsWrapper} object. Note that when one of these + * objects are provided, any settings in $credentialsConfig will be ignored. + * @type array $credentialsConfig + * Options used to configure credentials, including auth token caching, for the + * client. For a full list of supporting configuration options, see + * {@see \Google\ApiCore\CredentialsWrapper::build()} . + * @type bool $disableRetries + * Determines whether or not retries defined by the client configuration should be + * disabled. Defaults to `false`. + * @type string|array $clientConfig + * Client method configuration, including retry settings. This option can be either + * a path to a JSON file, or a PHP array containing the decoded JSON data. By + * default this settings points to the default client config file, which is + * provided in the resources folder. + * @type string|TransportInterface $transport + * The transport used for executing network requests. May be either the string + * `rest` or `grpc`. Defaults to `grpc` if gRPC support is detected on the system. + * *Advanced usage*: Additionally, it is possible to pass in an already + * instantiated {@see \Google\ApiCore\Transport\TransportInterface} object. Note + * that when this object is provided, any settings in $transportConfig, and any + * $apiEndpoint setting, will be ignored. + * @type array $transportConfig + * Configuration options that will be used to construct the transport. Options for + * each supported transport type should be passed in a key for that transport. For + * example: + * $transportConfig = [ + * 'grpc' => [...], + * 'rest' => [...], + * ]; + * See the {@see \Google\ApiCore\Transport\GrpcTransport::build()} and + * {@see \Google\ApiCore\Transport\RestTransport::build()} methods for the + * supported options. + * @type callable $clientCertSource + * A callable which returns the client cert as a string. This can be used to + * provide a certificate and private key to the transport layer for mTLS. + * } + * + * @throws ValidationException + */ + public function __construct(array $options = []) + { + $clientOptions = $this->buildClientOptions($options); + $this->setClientOptions($clientOptions); + $this->operationsClient = $this->createOperationsClient($clientOptions); + } + + /** Handles execution of the async variants for each documented method. */ + public function __call($method, $args) + { + if (substr($method, -5) !== 'Async') { + trigger_error('Call to undefined method ' . __CLASS__ . "::$method()", E_USER_ERROR); + } + + array_unshift($args, substr($method, 0, -5)); + return call_user_func_array([$this, 'startAsyncCall'], $args); + } + + /** + * Creates a new cluster in a given project and location. + * + * The async variant is {@see ManagedKafkaClient::createClusterAsync()} . + * + * @example samples/V1/ManagedKafkaClient/create_cluster.php + * + * @param CreateClusterRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return OperationResponse + * + * @throws ApiException Thrown if the API call fails. + */ + public function createCluster(CreateClusterRequest $request, array $callOptions = []): OperationResponse + { + return $this->startApiCall('CreateCluster', $request, $callOptions)->wait(); + } + + /** + * Creates a new topic in a given project and location. + * + * The async variant is {@see ManagedKafkaClient::createTopicAsync()} . + * + * @example samples/V1/ManagedKafkaClient/create_topic.php + * + * @param CreateTopicRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return Topic + * + * @throws ApiException Thrown if the API call fails. + */ + public function createTopic(CreateTopicRequest $request, array $callOptions = []): Topic + { + return $this->startApiCall('CreateTopic', $request, $callOptions)->wait(); + } + + /** + * Deletes a single cluster. + * + * The async variant is {@see ManagedKafkaClient::deleteClusterAsync()} . + * + * @example samples/V1/ManagedKafkaClient/delete_cluster.php + * + * @param DeleteClusterRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return OperationResponse + * + * @throws ApiException Thrown if the API call fails. + */ + public function deleteCluster(DeleteClusterRequest $request, array $callOptions = []): OperationResponse + { + return $this->startApiCall('DeleteCluster', $request, $callOptions)->wait(); + } + + /** + * Deletes a single consumer group. + * + * The async variant is {@see ManagedKafkaClient::deleteConsumerGroupAsync()} . + * + * @example samples/V1/ManagedKafkaClient/delete_consumer_group.php + * + * @param DeleteConsumerGroupRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @throws ApiException Thrown if the API call fails. + */ + public function deleteConsumerGroup(DeleteConsumerGroupRequest $request, array $callOptions = []): void + { + $this->startApiCall('DeleteConsumerGroup', $request, $callOptions)->wait(); + } + + /** + * Deletes a single topic. + * + * The async variant is {@see ManagedKafkaClient::deleteTopicAsync()} . + * + * @example samples/V1/ManagedKafkaClient/delete_topic.php + * + * @param DeleteTopicRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @throws ApiException Thrown if the API call fails. + */ + public function deleteTopic(DeleteTopicRequest $request, array $callOptions = []): void + { + $this->startApiCall('DeleteTopic', $request, $callOptions)->wait(); + } + + /** + * Returns the properties of a single cluster. + * + * The async variant is {@see ManagedKafkaClient::getClusterAsync()} . + * + * @example samples/V1/ManagedKafkaClient/get_cluster.php + * + * @param GetClusterRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return Cluster + * + * @throws ApiException Thrown if the API call fails. + */ + public function getCluster(GetClusterRequest $request, array $callOptions = []): Cluster + { + return $this->startApiCall('GetCluster', $request, $callOptions)->wait(); + } + + /** + * Returns the properties of a single consumer group. + * + * The async variant is {@see ManagedKafkaClient::getConsumerGroupAsync()} . + * + * @example samples/V1/ManagedKafkaClient/get_consumer_group.php + * + * @param GetConsumerGroupRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return ConsumerGroup + * + * @throws ApiException Thrown if the API call fails. + */ + public function getConsumerGroup(GetConsumerGroupRequest $request, array $callOptions = []): ConsumerGroup + { + return $this->startApiCall('GetConsumerGroup', $request, $callOptions)->wait(); + } + + /** + * Returns the properties of a single topic. + * + * The async variant is {@see ManagedKafkaClient::getTopicAsync()} . + * + * @example samples/V1/ManagedKafkaClient/get_topic.php + * + * @param GetTopicRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return Topic + * + * @throws ApiException Thrown if the API call fails. + */ + public function getTopic(GetTopicRequest $request, array $callOptions = []): Topic + { + return $this->startApiCall('GetTopic', $request, $callOptions)->wait(); + } + + /** + * Lists the clusters in a given project and location. + * + * The async variant is {@see ManagedKafkaClient::listClustersAsync()} . + * + * @example samples/V1/ManagedKafkaClient/list_clusters.php + * + * @param ListClustersRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return PagedListResponse + * + * @throws ApiException Thrown if the API call fails. + */ + public function listClusters(ListClustersRequest $request, array $callOptions = []): PagedListResponse + { + return $this->startApiCall('ListClusters', $request, $callOptions); + } + + /** + * Lists the consumer groups in a given cluster. + * + * The async variant is {@see ManagedKafkaClient::listConsumerGroupsAsync()} . + * + * @example samples/V1/ManagedKafkaClient/list_consumer_groups.php + * + * @param ListConsumerGroupsRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return PagedListResponse + * + * @throws ApiException Thrown if the API call fails. + */ + public function listConsumerGroups(ListConsumerGroupsRequest $request, array $callOptions = []): PagedListResponse + { + return $this->startApiCall('ListConsumerGroups', $request, $callOptions); + } + + /** + * Lists the topics in a given cluster. + * + * The async variant is {@see ManagedKafkaClient::listTopicsAsync()} . + * + * @example samples/V1/ManagedKafkaClient/list_topics.php + * + * @param ListTopicsRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return PagedListResponse + * + * @throws ApiException Thrown if the API call fails. + */ + public function listTopics(ListTopicsRequest $request, array $callOptions = []): PagedListResponse + { + return $this->startApiCall('ListTopics', $request, $callOptions); + } + + /** + * Updates the properties of a single cluster. + * + * The async variant is {@see ManagedKafkaClient::updateClusterAsync()} . + * + * @example samples/V1/ManagedKafkaClient/update_cluster.php + * + * @param UpdateClusterRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return OperationResponse + * + * @throws ApiException Thrown if the API call fails. + */ + public function updateCluster(UpdateClusterRequest $request, array $callOptions = []): OperationResponse + { + return $this->startApiCall('UpdateCluster', $request, $callOptions)->wait(); + } + + /** + * Updates the properties of a single consumer group. + * + * The async variant is {@see ManagedKafkaClient::updateConsumerGroupAsync()} . + * + * @example samples/V1/ManagedKafkaClient/update_consumer_group.php + * + * @param UpdateConsumerGroupRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return ConsumerGroup + * + * @throws ApiException Thrown if the API call fails. + */ + public function updateConsumerGroup(UpdateConsumerGroupRequest $request, array $callOptions = []): ConsumerGroup + { + return $this->startApiCall('UpdateConsumerGroup', $request, $callOptions)->wait(); + } + + /** + * Updates the properties of a single topic. + * + * The async variant is {@see ManagedKafkaClient::updateTopicAsync()} . + * + * @example samples/V1/ManagedKafkaClient/update_topic.php + * + * @param UpdateTopicRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return Topic + * + * @throws ApiException Thrown if the API call fails. + */ + public function updateTopic(UpdateTopicRequest $request, array $callOptions = []): Topic + { + return $this->startApiCall('UpdateTopic', $request, $callOptions)->wait(); + } + + /** + * Gets information about a location. + * + * The async variant is {@see ManagedKafkaClient::getLocationAsync()} . + * + * @example samples/V1/ManagedKafkaClient/get_location.php + * + * @param GetLocationRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return Location + * + * @throws ApiException Thrown if the API call fails. + */ + public function getLocation(GetLocationRequest $request, array $callOptions = []): Location + { + return $this->startApiCall('GetLocation', $request, $callOptions)->wait(); + } + + /** + * Lists information about the supported locations for this service. + * + * The async variant is {@see ManagedKafkaClient::listLocationsAsync()} . + * + * @example samples/V1/ManagedKafkaClient/list_locations.php + * + * @param ListLocationsRequest $request A request to house fields associated with the call. + * @param array $callOptions { + * Optional. + * + * @type RetrySettings|array $retrySettings + * Retry settings to use for this call. Can be a {@see RetrySettings} object, or an + * associative array of retry settings parameters. See the documentation on + * {@see RetrySettings} for example usage. + * } + * + * @return PagedListResponse + * + * @throws ApiException Thrown if the API call fails. + */ + public function listLocations(ListLocationsRequest $request, array $callOptions = []): PagedListResponse + { + return $this->startApiCall('ListLocations', $request, $callOptions); + } +} diff --git a/ManagedKafka/src/V1/Cluster.php b/ManagedKafka/src/V1/Cluster.php new file mode 100644 index 000000000000..d3d0d0766e51 --- /dev/null +++ b/ManagedKafka/src/V1/Cluster.php @@ -0,0 +1,360 @@ +google.cloud.managedkafka.v1.Cluster + */ +class Cluster extends \Google\Protobuf\Internal\Message +{ + /** + * Identifier. The name of the cluster. Structured like: + * projects/{project_number}/locations/{location}/clusters/{cluster_id} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + */ + protected $name = ''; + /** + * Output only. The time when the cluster was created. + * + * Generated from protobuf field .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $create_time = null; + /** + * Output only. The time when the cluster was last updated. + * + * Generated from protobuf field .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $update_time = null; + /** + * Optional. Labels as key value pairs. + * + * Generated from protobuf field map labels = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + private $labels; + /** + * Required. Capacity configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 5 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $capacity_config = null; + /** + * Optional. Rebalance configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.RebalanceConfig rebalance_config = 8 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $rebalance_config = null; + /** + * Output only. The current state of the cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster.State state = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $state = 0; + protected $platform_config; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type \Google\Cloud\ManagedKafka\V1\GcpConfig $gcp_config + * Required. Configuration properties for a Kafka cluster deployed to Google + * Cloud Platform. + * @type string $name + * Identifier. The name of the cluster. Structured like: + * projects/{project_number}/locations/{location}/clusters/{cluster_id} + * @type \Google\Protobuf\Timestamp $create_time + * Output only. The time when the cluster was created. + * @type \Google\Protobuf\Timestamp $update_time + * Output only. The time when the cluster was last updated. + * @type array|\Google\Protobuf\Internal\MapField $labels + * Optional. Labels as key value pairs. + * @type \Google\Cloud\ManagedKafka\V1\CapacityConfig $capacity_config + * Required. Capacity configuration for the Kafka cluster. + * @type \Google\Cloud\ManagedKafka\V1\RebalanceConfig $rebalance_config + * Optional. Rebalance configuration for the Kafka cluster. + * @type int $state + * Output only. The current state of the cluster. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Required. Configuration properties for a Kafka cluster deployed to Google + * Cloud Platform. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.GcpConfig gcp_config = 9 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\GcpConfig|null + */ + public function getGcpConfig() + { + return $this->readOneof(9); + } + + public function hasGcpConfig() + { + return $this->hasOneof(9); + } + + /** + * Required. Configuration properties for a Kafka cluster deployed to Google + * Cloud Platform. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.GcpConfig gcp_config = 9 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\GcpConfig $var + * @return $this + */ + public function setGcpConfig($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\GcpConfig::class); + $this->writeOneof(9, $var); + + return $this; + } + + /** + * Identifier. The name of the cluster. Structured like: + * projects/{project_number}/locations/{location}/clusters/{cluster_id} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Identifier. The name of the cluster. Structured like: + * projects/{project_number}/locations/{location}/clusters/{cluster_id} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + + /** + * Output only. The time when the cluster was created. + * + * Generated from protobuf field .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return \Google\Protobuf\Timestamp|null + */ + public function getCreateTime() + { + return $this->create_time; + } + + public function hasCreateTime() + { + return isset($this->create_time); + } + + public function clearCreateTime() + { + unset($this->create_time); + } + + /** + * Output only. The time when the cluster was created. + * + * Generated from protobuf field .google.protobuf.Timestamp create_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param \Google\Protobuf\Timestamp $var + * @return $this + */ + public function setCreateTime($var) + { + GPBUtil::checkMessage($var, \Google\Protobuf\Timestamp::class); + $this->create_time = $var; + + return $this; + } + + /** + * Output only. The time when the cluster was last updated. + * + * Generated from protobuf field .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return \Google\Protobuf\Timestamp|null + */ + public function getUpdateTime() + { + return $this->update_time; + } + + public function hasUpdateTime() + { + return isset($this->update_time); + } + + public function clearUpdateTime() + { + unset($this->update_time); + } + + /** + * Output only. The time when the cluster was last updated. + * + * Generated from protobuf field .google.protobuf.Timestamp update_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param \Google\Protobuf\Timestamp $var + * @return $this + */ + public function setUpdateTime($var) + { + GPBUtil::checkMessage($var, \Google\Protobuf\Timestamp::class); + $this->update_time = $var; + + return $this; + } + + /** + * Optional. Labels as key value pairs. + * + * Generated from protobuf field map labels = 4 [(.google.api.field_behavior) = OPTIONAL]; + * @return \Google\Protobuf\Internal\MapField + */ + public function getLabels() + { + return $this->labels; + } + + /** + * Optional. Labels as key value pairs. + * + * Generated from protobuf field map labels = 4 [(.google.api.field_behavior) = OPTIONAL]; + * @param array|\Google\Protobuf\Internal\MapField $var + * @return $this + */ + public function setLabels($var) + { + $arr = GPBUtil::checkMapField($var, \Google\Protobuf\Internal\GPBType::STRING, \Google\Protobuf\Internal\GPBType::STRING); + $this->labels = $arr; + + return $this; + } + + /** + * Required. Capacity configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 5 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\CapacityConfig|null + */ + public function getCapacityConfig() + { + return $this->capacity_config; + } + + public function hasCapacityConfig() + { + return isset($this->capacity_config); + } + + public function clearCapacityConfig() + { + unset($this->capacity_config); + } + + /** + * Required. Capacity configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.CapacityConfig capacity_config = 5 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\CapacityConfig $var + * @return $this + */ + public function setCapacityConfig($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\CapacityConfig::class); + $this->capacity_config = $var; + + return $this; + } + + /** + * Optional. Rebalance configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.RebalanceConfig rebalance_config = 8 [(.google.api.field_behavior) = OPTIONAL]; + * @return \Google\Cloud\ManagedKafka\V1\RebalanceConfig|null + */ + public function getRebalanceConfig() + { + return $this->rebalance_config; + } + + public function hasRebalanceConfig() + { + return isset($this->rebalance_config); + } + + public function clearRebalanceConfig() + { + unset($this->rebalance_config); + } + + /** + * Optional. Rebalance configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.RebalanceConfig rebalance_config = 8 [(.google.api.field_behavior) = OPTIONAL]; + * @param \Google\Cloud\ManagedKafka\V1\RebalanceConfig $var + * @return $this + */ + public function setRebalanceConfig($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\RebalanceConfig::class); + $this->rebalance_config = $var; + + return $this; + } + + /** + * Output only. The current state of the cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster.State state = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return int + */ + public function getState() + { + return $this->state; + } + + /** + * Output only. The current state of the cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster.State state = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param int $var + * @return $this + */ + public function setState($var) + { + GPBUtil::checkEnum($var, \Google\Cloud\ManagedKafka\V1\Cluster\State::class); + $this->state = $var; + + return $this; + } + + /** + * @return string + */ + public function getPlatformConfig() + { + return $this->whichOneof("platform_config"); + } + +} + diff --git a/ManagedKafka/src/V1/Cluster/State.php b/ManagedKafka/src/V1/Cluster/State.php new file mode 100644 index 000000000000..7a114858b26e --- /dev/null +++ b/ManagedKafka/src/V1/Cluster/State.php @@ -0,0 +1,69 @@ +google.cloud.managedkafka.v1.Cluster.State + */ +class State +{ + /** + * A state was not specified. + * + * Generated from protobuf enum STATE_UNSPECIFIED = 0; + */ + const STATE_UNSPECIFIED = 0; + /** + * The cluster is being created. + * + * Generated from protobuf enum CREATING = 1; + */ + const CREATING = 1; + /** + * The cluster is active. + * + * Generated from protobuf enum ACTIVE = 2; + */ + const ACTIVE = 2; + /** + * The cluster is being deleted. + * + * Generated from protobuf enum DELETING = 3; + */ + const DELETING = 3; + + private static $valueToName = [ + self::STATE_UNSPECIFIED => 'STATE_UNSPECIFIED', + self::CREATING => 'CREATING', + self::ACTIVE => 'ACTIVE', + self::DELETING => 'DELETING', + ]; + + public static function name($value) + { + if (!isset(self::$valueToName[$value])) { + throw new UnexpectedValueException(sprintf( + 'Enum %s has no name defined for value %s', __CLASS__, $value)); + } + return self::$valueToName[$value]; + } + + + public static function value($name) + { + $const = __CLASS__ . '::' . strtoupper($name); + if (!defined($const)) { + throw new UnexpectedValueException(sprintf( + 'Enum %s has no value defined for name %s', __CLASS__, $name)); + } + return constant($const); + } +} + + diff --git a/ManagedKafka/src/V1/ConsumerGroup.php b/ManagedKafka/src/V1/ConsumerGroup.php new file mode 100644 index 000000000000..1dd45cb33745 --- /dev/null +++ b/ManagedKafka/src/V1/ConsumerGroup.php @@ -0,0 +1,117 @@ +google.cloud.managedkafka.v1.ConsumerGroup + */ +class ConsumerGroup extends \Google\Protobuf\Internal\Message +{ + /** + * Identifier. The name of the consumer group. The `consumer_group` segment is + * used when connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + */ + protected $name = ''; + /** + * Optional. Metadata for this consumer group for all topics it has metadata + * for. The key of the map is a topic name, structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * + * Generated from protobuf field map topics = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + private $topics; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Identifier. The name of the consumer group. The `consumer_group` segment is + * used when connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group} + * @type array|\Google\Protobuf\Internal\MapField $topics + * Optional. Metadata for this consumer group for all topics it has metadata + * for. The key of the map is a topic name, structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Identifier. The name of the consumer group. The `consumer_group` segment is + * used when connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Identifier. The name of the consumer group. The `consumer_group` segment is + * used when connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + + /** + * Optional. Metadata for this consumer group for all topics it has metadata + * for. The key of the map is a topic name, structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * + * Generated from protobuf field map topics = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @return \Google\Protobuf\Internal\MapField + */ + public function getTopics() + { + return $this->topics; + } + + /** + * Optional. Metadata for this consumer group for all topics it has metadata + * for. The key of the map is a topic name, structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * + * Generated from protobuf field map topics = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @param array|\Google\Protobuf\Internal\MapField $var + * @return $this + */ + public function setTopics($var) + { + $arr = GPBUtil::checkMapField($var, \Google\Protobuf\Internal\GPBType::STRING, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\ManagedKafka\V1\ConsumerTopicMetadata::class); + $this->topics = $arr; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ConsumerPartitionMetadata.php b/ManagedKafka/src/V1/ConsumerPartitionMetadata.php new file mode 100644 index 000000000000..b90ae90f1ac5 --- /dev/null +++ b/ManagedKafka/src/V1/ConsumerPartitionMetadata.php @@ -0,0 +1,109 @@ +google.cloud.managedkafka.v1.ConsumerPartitionMetadata + */ +class ConsumerPartitionMetadata extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The offset for this partition, or 0 if no offset has been + * committed. + * + * Generated from protobuf field int64 offset = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $offset = 0; + /** + * Optional. The associated metadata for this partition, or empty if it does + * not exist. + * + * Generated from protobuf field string metadata = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $metadata = ''; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type int|string $offset + * Required. The offset for this partition, or 0 if no offset has been + * committed. + * @type string $metadata + * Optional. The associated metadata for this partition, or empty if it does + * not exist. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Required. The offset for this partition, or 0 if no offset has been + * committed. + * + * Generated from protobuf field int64 offset = 1 [(.google.api.field_behavior) = REQUIRED]; + * @return int|string + */ + public function getOffset() + { + return $this->offset; + } + + /** + * Required. The offset for this partition, or 0 if no offset has been + * committed. + * + * Generated from protobuf field int64 offset = 1 [(.google.api.field_behavior) = REQUIRED]; + * @param int|string $var + * @return $this + */ + public function setOffset($var) + { + GPBUtil::checkInt64($var); + $this->offset = $var; + + return $this; + } + + /** + * Optional. The associated metadata for this partition, or empty if it does + * not exist. + * + * Generated from protobuf field string metadata = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @return string + */ + public function getMetadata() + { + return $this->metadata; + } + + /** + * Optional. The associated metadata for this partition, or empty if it does + * not exist. + * + * Generated from protobuf field string metadata = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @param string $var + * @return $this + */ + public function setMetadata($var) + { + GPBUtil::checkString($var, True); + $this->metadata = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ConsumerTopicMetadata.php b/ManagedKafka/src/V1/ConsumerTopicMetadata.php new file mode 100644 index 000000000000..1bae3f307e4f --- /dev/null +++ b/ManagedKafka/src/V1/ConsumerTopicMetadata.php @@ -0,0 +1,71 @@ +google.cloud.managedkafka.v1.ConsumerTopicMetadata + */ +class ConsumerTopicMetadata extends \Google\Protobuf\Internal\Message +{ + /** + * Optional. Metadata for this consumer group and topic for all partition + * indexes it has metadata for. + * + * Generated from protobuf field map partitions = 1 [(.google.api.field_behavior) = OPTIONAL]; + */ + private $partitions; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type array|\Google\Protobuf\Internal\MapField $partitions + * Optional. Metadata for this consumer group and topic for all partition + * indexes it has metadata for. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Optional. Metadata for this consumer group and topic for all partition + * indexes it has metadata for. + * + * Generated from protobuf field map partitions = 1 [(.google.api.field_behavior) = OPTIONAL]; + * @return \Google\Protobuf\Internal\MapField + */ + public function getPartitions() + { + return $this->partitions; + } + + /** + * Optional. Metadata for this consumer group and topic for all partition + * indexes it has metadata for. + * + * Generated from protobuf field map partitions = 1 [(.google.api.field_behavior) = OPTIONAL]; + * @param array|\Google\Protobuf\Internal\MapField $var + * @return $this + */ + public function setPartitions($var) + { + $arr = GPBUtil::checkMapField($var, \Google\Protobuf\Internal\GPBType::INT32, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\ManagedKafka\V1\ConsumerPartitionMetadata::class); + $this->partitions = $arr; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/CreateClusterRequest.php b/ManagedKafka/src/V1/CreateClusterRequest.php new file mode 100644 index 000000000000..8271e3324f0e --- /dev/null +++ b/ManagedKafka/src/V1/CreateClusterRequest.php @@ -0,0 +1,272 @@ +google.cloud.managedkafka.v1.CreateClusterRequest + */ +class CreateClusterRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The parent region in which to create the cluster. Structured like + * `projects/{project}/locations/{location}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $parent = ''; + /** + * Required. The ID to use for the cluster, which will become the final + * component of the cluster's name. The ID must be 1-63 characters long, and + * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with + * RFC 1035. + * This value is structured like: `my-cluster-id`. + * + * Generated from protobuf field string cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $cluster_id = ''; + /** + * Required. Configuration of the cluster to create. Its `name` field is + * ignored. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $cluster = null; + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + */ + protected $request_id = ''; + + /** + * @param string $parent Required. The parent region in which to create the cluster. Structured like + * `projects/{project}/locations/{location}`. Please see + * {@see ManagedKafkaClient::locationName()} for help formatting this field. + * @param \Google\Cloud\ManagedKafka\V1\Cluster $cluster Required. Configuration of the cluster to create. Its `name` field is + * ignored. + * @param string $clusterId Required. The ID to use for the cluster, which will become the final + * component of the cluster's name. The ID must be 1-63 characters long, and + * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with + * RFC 1035. + * + * This value is structured like: `my-cluster-id`. + * + * @return \Google\Cloud\ManagedKafka\V1\CreateClusterRequest + * + * @experimental + */ + public static function build(string $parent, \Google\Cloud\ManagedKafka\V1\Cluster $cluster, string $clusterId): self + { + return (new self()) + ->setParent($parent) + ->setCluster($cluster) + ->setClusterId($clusterId); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $parent + * Required. The parent region in which to create the cluster. Structured like + * `projects/{project}/locations/{location}`. + * @type string $cluster_id + * Required. The ID to use for the cluster, which will become the final + * component of the cluster's name. The ID must be 1-63 characters long, and + * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with + * RFC 1035. + * This value is structured like: `my-cluster-id`. + * @type \Google\Cloud\ManagedKafka\V1\Cluster $cluster + * Required. Configuration of the cluster to create. Its `name` field is + * ignored. + * @type string $request_id + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The parent region in which to create the cluster. Structured like + * `projects/{project}/locations/{location}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getParent() + { + return $this->parent; + } + + /** + * Required. The parent region in which to create the cluster. Structured like + * `projects/{project}/locations/{location}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setParent($var) + { + GPBUtil::checkString($var, True); + $this->parent = $var; + + return $this; + } + + /** + * Required. The ID to use for the cluster, which will become the final + * component of the cluster's name. The ID must be 1-63 characters long, and + * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with + * RFC 1035. + * This value is structured like: `my-cluster-id`. + * + * Generated from protobuf field string cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return string + */ + public function getClusterId() + { + return $this->cluster_id; + } + + /** + * Required. The ID to use for the cluster, which will become the final + * component of the cluster's name. The ID must be 1-63 characters long, and + * match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with + * RFC 1035. + * This value is structured like: `my-cluster-id`. + * + * Generated from protobuf field string cluster_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param string $var + * @return $this + */ + public function setClusterId($var) + { + GPBUtil::checkString($var, True); + $this->cluster_id = $var; + + return $this; + } + + /** + * Required. Configuration of the cluster to create. Its `name` field is + * ignored. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\Cluster|null + */ + public function getCluster() + { + return $this->cluster; + } + + public function hasCluster() + { + return isset($this->cluster); + } + + public function clearCluster() + { + unset($this->cluster); + } + + /** + * Required. Configuration of the cluster to create. Its `name` field is + * ignored. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster cluster = 3 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\Cluster $var + * @return $this + */ + public function setCluster($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\Cluster::class); + $this->cluster = $var; + + return $this; + } + + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + * @return string + */ + public function getRequestId() + { + return $this->request_id; + } + + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 4 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + * @param string $var + * @return $this + */ + public function setRequestId($var) + { + GPBUtil::checkString($var, True); + $this->request_id = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/CreateTopicRequest.php b/ManagedKafka/src/V1/CreateTopicRequest.php new file mode 100644 index 000000000000..49797ac7e3d0 --- /dev/null +++ b/ManagedKafka/src/V1/CreateTopicRequest.php @@ -0,0 +1,189 @@ +google.cloud.managedkafka.v1.CreateTopicRequest + */ +class CreateTopicRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The parent cluster in which to create the topic. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $parent = ''; + /** + * Required. The ID to use for the topic, which will become the final + * component of the topic's name. + * This value is structured like: `my-topic-name`. + * + * Generated from protobuf field string topic_id = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $topic_id = ''; + /** + * Required. Configuration of the topic to create. Its `name` field is + * ignored. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Topic topic = 3 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $topic = null; + + /** + * @param string $parent Required. The parent cluster in which to create the topic. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. Please see + * {@see ManagedKafkaClient::clusterName()} for help formatting this field. + * @param \Google\Cloud\ManagedKafka\V1\Topic $topic Required. Configuration of the topic to create. Its `name` field is + * ignored. + * @param string $topicId Required. The ID to use for the topic, which will become the final + * component of the topic's name. + * + * This value is structured like: `my-topic-name`. + * + * @return \Google\Cloud\ManagedKafka\V1\CreateTopicRequest + * + * @experimental + */ + public static function build(string $parent, \Google\Cloud\ManagedKafka\V1\Topic $topic, string $topicId): self + { + return (new self()) + ->setParent($parent) + ->setTopic($topic) + ->setTopicId($topicId); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $parent + * Required. The parent cluster in which to create the topic. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * @type string $topic_id + * Required. The ID to use for the topic, which will become the final + * component of the topic's name. + * This value is structured like: `my-topic-name`. + * @type \Google\Cloud\ManagedKafka\V1\Topic $topic + * Required. Configuration of the topic to create. Its `name` field is + * ignored. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The parent cluster in which to create the topic. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getParent() + { + return $this->parent; + } + + /** + * Required. The parent cluster in which to create the topic. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setParent($var) + { + GPBUtil::checkString($var, True); + $this->parent = $var; + + return $this; + } + + /** + * Required. The ID to use for the topic, which will become the final + * component of the topic's name. + * This value is structured like: `my-topic-name`. + * + * Generated from protobuf field string topic_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return string + */ + public function getTopicId() + { + return $this->topic_id; + } + + /** + * Required. The ID to use for the topic, which will become the final + * component of the topic's name. + * This value is structured like: `my-topic-name`. + * + * Generated from protobuf field string topic_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param string $var + * @return $this + */ + public function setTopicId($var) + { + GPBUtil::checkString($var, True); + $this->topic_id = $var; + + return $this; + } + + /** + * Required. Configuration of the topic to create. Its `name` field is + * ignored. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Topic topic = 3 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\Topic|null + */ + public function getTopic() + { + return $this->topic; + } + + public function hasTopic() + { + return isset($this->topic); + } + + public function clearTopic() + { + unset($this->topic); + } + + /** + * Required. Configuration of the topic to create. Its `name` field is + * ignored. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Topic topic = 3 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\Topic $var + * @return $this + */ + public function setTopic($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\Topic::class); + $this->topic = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/DeleteClusterRequest.php b/ManagedKafka/src/V1/DeleteClusterRequest.php new file mode 100644 index 000000000000..d01b80718e76 --- /dev/null +++ b/ManagedKafka/src/V1/DeleteClusterRequest.php @@ -0,0 +1,159 @@ +google.cloud.managedkafka.v1.DeleteClusterRequest + */ +class DeleteClusterRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The name of the cluster to delete. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $name = ''; + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + */ + protected $request_id = ''; + + /** + * @param string $name Required. The name of the cluster to delete. Please see + * {@see ManagedKafkaClient::clusterName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\DeleteClusterRequest + * + * @experimental + */ + public static function build(string $name): self + { + return (new self()) + ->setName($name); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Required. The name of the cluster to delete. + * @type string $request_id + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The name of the cluster to delete. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Required. The name of the cluster to delete. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + * @return string + */ + public function getRequestId() + { + return $this->request_id; + } + + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + * @param string $var + * @return $this + */ + public function setRequestId($var) + { + GPBUtil::checkString($var, True); + $this->request_id = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/DeleteConsumerGroupRequest.php b/ManagedKafka/src/V1/DeleteConsumerGroupRequest.php new file mode 100644 index 000000000000..fec2b3a08134 --- /dev/null +++ b/ManagedKafka/src/V1/DeleteConsumerGroupRequest.php @@ -0,0 +1,86 @@ +google.cloud.managedkafka.v1.DeleteConsumerGroupRequest + */ +class DeleteConsumerGroupRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The name of the consumer group to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $name = ''; + + /** + * @param string $name Required. The name of the consumer group to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. Please see + * {@see ManagedKafkaClient::consumerGroupName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\DeleteConsumerGroupRequest + * + * @experimental + */ + public static function build(string $name): self + { + return (new self()) + ->setName($name); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Required. The name of the consumer group to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The name of the consumer group to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Required. The name of the consumer group to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/DeleteTopicRequest.php b/ManagedKafka/src/V1/DeleteTopicRequest.php new file mode 100644 index 000000000000..c8f0b94a2ee5 --- /dev/null +++ b/ManagedKafka/src/V1/DeleteTopicRequest.php @@ -0,0 +1,86 @@ +google.cloud.managedkafka.v1.DeleteTopicRequest + */ +class DeleteTopicRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The name of the topic to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $name = ''; + + /** + * @param string $name Required. The name of the topic to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}`. Please see + * {@see ManagedKafkaClient::topicName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\DeleteTopicRequest + * + * @experimental + */ + public static function build(string $name): self + { + return (new self()) + ->setName($name); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Required. The name of the topic to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}`. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The name of the topic to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Required. The name of the topic to delete. + * `projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/GcpConfig.php b/ManagedKafka/src/V1/GcpConfig.php new file mode 100644 index 000000000000..375598e9df03 --- /dev/null +++ b/ManagedKafka/src/V1/GcpConfig.php @@ -0,0 +1,132 @@ +google.cloud.managedkafka.v1.GcpConfig + */ +class GcpConfig extends \Google\Protobuf\Internal\Message +{ + /** + * Required. Access configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.AccessConfig access_config = 3 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $access_config = null; + /** + * Optional. Immutable. The Cloud KMS Key name to use for encryption. The key + * must be located in the same region as the cluster and cannot be changed. + * Structured like: + * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. + * Note that the project component only accepts a project ID, and not a + * project number. + * + * Generated from protobuf field string kms_key = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { + */ + protected $kms_key = ''; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type \Google\Cloud\ManagedKafka\V1\AccessConfig $access_config + * Required. Access configuration for the Kafka cluster. + * @type string $kms_key + * Optional. Immutable. The Cloud KMS Key name to use for encryption. The key + * must be located in the same region as the cluster and cannot be changed. + * Structured like: + * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. + * Note that the project component only accepts a project ID, and not a + * project number. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Required. Access configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.AccessConfig access_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\AccessConfig|null + */ + public function getAccessConfig() + { + return $this->access_config; + } + + public function hasAccessConfig() + { + return isset($this->access_config); + } + + public function clearAccessConfig() + { + unset($this->access_config); + } + + /** + * Required. Access configuration for the Kafka cluster. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.AccessConfig access_config = 3 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\AccessConfig $var + * @return $this + */ + public function setAccessConfig($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\AccessConfig::class); + $this->access_config = $var; + + return $this; + } + + /** + * Optional. Immutable. The Cloud KMS Key name to use for encryption. The key + * must be located in the same region as the cluster and cannot be changed. + * Structured like: + * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. + * Note that the project component only accepts a project ID, and not a + * project number. + * + * Generated from protobuf field string kms_key = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { + * @return string + */ + public function getKmsKey() + { + return $this->kms_key; + } + + /** + * Optional. Immutable. The Cloud KMS Key name to use for encryption. The key + * must be located in the same region as the cluster and cannot be changed. + * Structured like: + * projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. + * Note that the project component only accepts a project ID, and not a + * project number. + * + * Generated from protobuf field string kms_key = 2 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setKmsKey($var) + { + GPBUtil::checkString($var, True); + $this->kms_key = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/GetClusterRequest.php b/ManagedKafka/src/V1/GetClusterRequest.php new file mode 100644 index 000000000000..a0aae7374680 --- /dev/null +++ b/ManagedKafka/src/V1/GetClusterRequest.php @@ -0,0 +1,81 @@ +google.cloud.managedkafka.v1.GetClusterRequest + */ +class GetClusterRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The name of the cluster whose configuration to return. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $name = ''; + + /** + * @param string $name Required. The name of the cluster whose configuration to return. Please see + * {@see ManagedKafkaClient::clusterName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\GetClusterRequest + * + * @experimental + */ + public static function build(string $name): self + { + return (new self()) + ->setName($name); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Required. The name of the cluster whose configuration to return. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The name of the cluster whose configuration to return. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Required. The name of the cluster whose configuration to return. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/GetConsumerGroupRequest.php b/ManagedKafka/src/V1/GetConsumerGroupRequest.php new file mode 100644 index 000000000000..05104476d0a0 --- /dev/null +++ b/ManagedKafka/src/V1/GetConsumerGroupRequest.php @@ -0,0 +1,86 @@ +google.cloud.managedkafka.v1.GetConsumerGroupRequest + */ +class GetConsumerGroupRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The name of the consumer group whose configuration to return. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $name = ''; + + /** + * @param string $name Required. The name of the consumer group whose configuration to return. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. Please see + * {@see ManagedKafkaClient::consumerGroupName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\GetConsumerGroupRequest + * + * @experimental + */ + public static function build(string $name): self + { + return (new self()) + ->setName($name); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Required. The name of the consumer group whose configuration to return. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The name of the consumer group whose configuration to return. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Required. The name of the consumer group whose configuration to return. + * `projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}`. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/GetTopicRequest.php b/ManagedKafka/src/V1/GetTopicRequest.php new file mode 100644 index 000000000000..49f1ad997717 --- /dev/null +++ b/ManagedKafka/src/V1/GetTopicRequest.php @@ -0,0 +1,91 @@ +google.cloud.managedkafka.v1.GetTopicRequest + */ +class GetTopicRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The name of the topic whose configuration to return. Structured + * like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $name = ''; + + /** + * @param string $name Required. The name of the topic whose configuration to return. Structured + * like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. Please see + * {@see ManagedKafkaClient::topicName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\GetTopicRequest + * + * @experimental + */ + public static function build(string $name): self + { + return (new self()) + ->setName($name); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Required. The name of the topic whose configuration to return. Structured + * like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The name of the topic whose configuration to return. Structured + * like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Required. The name of the topic whose configuration to return. Structured + * like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ListClustersRequest.php b/ManagedKafka/src/V1/ListClustersRequest.php new file mode 100644 index 000000000000..aa5222147449 --- /dev/null +++ b/ManagedKafka/src/V1/ListClustersRequest.php @@ -0,0 +1,242 @@ +google.cloud.managedkafka.v1.ListClustersRequest + */ +class ListClustersRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The parent location whose clusters are to be listed. Structured + * like `projects/{project}/locations/{location}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $parent = ''; + /** + * Optional. The maximum number of clusters to return. The service may return + * fewer than this value. If unspecified, server will pick an appropriate + * default. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $page_size = 0; + /** + * Optional. A page token, received from a previous `ListClusters` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListClusters` must match + * the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $page_token = ''; + /** + * Optional. Filter expression for the result. + * + * Generated from protobuf field string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $filter = ''; + /** + * Optional. Order by fields for the result. + * + * Generated from protobuf field string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $order_by = ''; + + /** + * @param string $parent Required. The parent location whose clusters are to be listed. Structured + * like `projects/{project}/locations/{location}`. Please see + * {@see ManagedKafkaClient::locationName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\ListClustersRequest + * + * @experimental + */ + public static function build(string $parent): self + { + return (new self()) + ->setParent($parent); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $parent + * Required. The parent location whose clusters are to be listed. Structured + * like `projects/{project}/locations/{location}`. + * @type int $page_size + * Optional. The maximum number of clusters to return. The service may return + * fewer than this value. If unspecified, server will pick an appropriate + * default. + * @type string $page_token + * Optional. A page token, received from a previous `ListClusters` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListClusters` must match + * the call that provided the page token. + * @type string $filter + * Optional. Filter expression for the result. + * @type string $order_by + * Optional. Order by fields for the result. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The parent location whose clusters are to be listed. Structured + * like `projects/{project}/locations/{location}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getParent() + { + return $this->parent; + } + + /** + * Required. The parent location whose clusters are to be listed. Structured + * like `projects/{project}/locations/{location}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setParent($var) + { + GPBUtil::checkString($var, True); + $this->parent = $var; + + return $this; + } + + /** + * Optional. The maximum number of clusters to return. The service may return + * fewer than this value. If unspecified, server will pick an appropriate + * default. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @return int + */ + public function getPageSize() + { + return $this->page_size; + } + + /** + * Optional. The maximum number of clusters to return. The service may return + * fewer than this value. If unspecified, server will pick an appropriate + * default. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @param int $var + * @return $this + */ + public function setPageSize($var) + { + GPBUtil::checkInt32($var); + $this->page_size = $var; + + return $this; + } + + /** + * Optional. A page token, received from a previous `ListClusters` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListClusters` must match + * the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * @return string + */ + public function getPageToken() + { + return $this->page_token; + } + + /** + * Optional. A page token, received from a previous `ListClusters` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListClusters` must match + * the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * @param string $var + * @return $this + */ + public function setPageToken($var) + { + GPBUtil::checkString($var, True); + $this->page_token = $var; + + return $this; + } + + /** + * Optional. Filter expression for the result. + * + * Generated from protobuf field string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * @return string + */ + public function getFilter() + { + return $this->filter; + } + + /** + * Optional. Filter expression for the result. + * + * Generated from protobuf field string filter = 4 [(.google.api.field_behavior) = OPTIONAL]; + * @param string $var + * @return $this + */ + public function setFilter($var) + { + GPBUtil::checkString($var, True); + $this->filter = $var; + + return $this; + } + + /** + * Optional. Order by fields for the result. + * + * Generated from protobuf field string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * @return string + */ + public function getOrderBy() + { + return $this->order_by; + } + + /** + * Optional. Order by fields for the result. + * + * Generated from protobuf field string order_by = 5 [(.google.api.field_behavior) = OPTIONAL]; + * @param string $var + * @return $this + */ + public function setOrderBy($var) + { + GPBUtil::checkString($var, True); + $this->order_by = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ListClustersResponse.php b/ManagedKafka/src/V1/ListClustersResponse.php new file mode 100644 index 000000000000..f02d1357fbc3 --- /dev/null +++ b/ManagedKafka/src/V1/ListClustersResponse.php @@ -0,0 +1,139 @@ +google.cloud.managedkafka.v1.ListClustersResponse + */ +class ListClustersResponse extends \Google\Protobuf\Internal\Message +{ + /** + * The list of Clusters in the requested parent. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.Cluster clusters = 1; + */ + private $clusters; + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + */ + protected $next_page_token = ''; + /** + * Locations that could not be reached. + * + * Generated from protobuf field repeated string unreachable = 3; + */ + private $unreachable; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type array<\Google\Cloud\ManagedKafka\V1\Cluster>|\Google\Protobuf\Internal\RepeatedField $clusters + * The list of Clusters in the requested parent. + * @type string $next_page_token + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * @type array|\Google\Protobuf\Internal\RepeatedField $unreachable + * Locations that could not be reached. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * The list of Clusters in the requested parent. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.Cluster clusters = 1; + * @return \Google\Protobuf\Internal\RepeatedField + */ + public function getClusters() + { + return $this->clusters; + } + + /** + * The list of Clusters in the requested parent. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.Cluster clusters = 1; + * @param array<\Google\Cloud\ManagedKafka\V1\Cluster>|\Google\Protobuf\Internal\RepeatedField $var + * @return $this + */ + public function setClusters($var) + { + $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\ManagedKafka\V1\Cluster::class); + $this->clusters = $arr; + + return $this; + } + + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + * @return string + */ + public function getNextPageToken() + { + return $this->next_page_token; + } + + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + * @param string $var + * @return $this + */ + public function setNextPageToken($var) + { + GPBUtil::checkString($var, True); + $this->next_page_token = $var; + + return $this; + } + + /** + * Locations that could not be reached. + * + * Generated from protobuf field repeated string unreachable = 3; + * @return \Google\Protobuf\Internal\RepeatedField + */ + public function getUnreachable() + { + return $this->unreachable; + } + + /** + * Locations that could not be reached. + * + * Generated from protobuf field repeated string unreachable = 3; + * @param array|\Google\Protobuf\Internal\RepeatedField $var + * @return $this + */ + public function setUnreachable($var) + { + $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::STRING); + $this->unreachable = $arr; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ListConsumerGroupsRequest.php b/ManagedKafka/src/V1/ListConsumerGroupsRequest.php new file mode 100644 index 000000000000..d201e3bc513c --- /dev/null +++ b/ManagedKafka/src/V1/ListConsumerGroupsRequest.php @@ -0,0 +1,179 @@ +google.cloud.managedkafka.v1.ListConsumerGroupsRequest + */ +class ListConsumerGroupsRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The parent cluster whose consumer groups are to be listed. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $parent = ''; + /** + * Optional. The maximum number of consumer groups to return. The service may + * return fewer than this value. If unset or zero, all consumer groups for the + * parent is returned. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $page_size = 0; + /** + * Optional. A page token, received from a previous `ListConsumerGroups` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListConsumerGroups` must + * match the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $page_token = ''; + + /** + * @param string $parent Required. The parent cluster whose consumer groups are to be listed. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. Please see + * {@see ManagedKafkaClient::clusterName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\ListConsumerGroupsRequest + * + * @experimental + */ + public static function build(string $parent): self + { + return (new self()) + ->setParent($parent); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $parent + * Required. The parent cluster whose consumer groups are to be listed. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * @type int $page_size + * Optional. The maximum number of consumer groups to return. The service may + * return fewer than this value. If unset or zero, all consumer groups for the + * parent is returned. + * @type string $page_token + * Optional. A page token, received from a previous `ListConsumerGroups` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListConsumerGroups` must + * match the call that provided the page token. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The parent cluster whose consumer groups are to be listed. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getParent() + { + return $this->parent; + } + + /** + * Required. The parent cluster whose consumer groups are to be listed. + * Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setParent($var) + { + GPBUtil::checkString($var, True); + $this->parent = $var; + + return $this; + } + + /** + * Optional. The maximum number of consumer groups to return. The service may + * return fewer than this value. If unset or zero, all consumer groups for the + * parent is returned. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @return int + */ + public function getPageSize() + { + return $this->page_size; + } + + /** + * Optional. The maximum number of consumer groups to return. The service may + * return fewer than this value. If unset or zero, all consumer groups for the + * parent is returned. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @param int $var + * @return $this + */ + public function setPageSize($var) + { + GPBUtil::checkInt32($var); + $this->page_size = $var; + + return $this; + } + + /** + * Optional. A page token, received from a previous `ListConsumerGroups` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListConsumerGroups` must + * match the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * @return string + */ + public function getPageToken() + { + return $this->page_token; + } + + /** + * Optional. A page token, received from a previous `ListConsumerGroups` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListConsumerGroups` must + * match the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * @param string $var + * @return $this + */ + public function setPageToken($var) + { + GPBUtil::checkString($var, True); + $this->page_token = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ListConsumerGroupsResponse.php b/ManagedKafka/src/V1/ListConsumerGroupsResponse.php new file mode 100644 index 000000000000..abcaff0766b1 --- /dev/null +++ b/ManagedKafka/src/V1/ListConsumerGroupsResponse.php @@ -0,0 +1,109 @@ +google.cloud.managedkafka.v1.ListConsumerGroupsResponse + */ +class ListConsumerGroupsResponse extends \Google\Protobuf\Internal\Message +{ + /** + * The list of consumer group in the requested parent. The order of the + * consumer groups is unspecified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.ConsumerGroup consumer_groups = 1; + */ + private $consumer_groups; + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + */ + protected $next_page_token = ''; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type array<\Google\Cloud\ManagedKafka\V1\ConsumerGroup>|\Google\Protobuf\Internal\RepeatedField $consumer_groups + * The list of consumer group in the requested parent. The order of the + * consumer groups is unspecified. + * @type string $next_page_token + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * The list of consumer group in the requested parent. The order of the + * consumer groups is unspecified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.ConsumerGroup consumer_groups = 1; + * @return \Google\Protobuf\Internal\RepeatedField + */ + public function getConsumerGroups() + { + return $this->consumer_groups; + } + + /** + * The list of consumer group in the requested parent. The order of the + * consumer groups is unspecified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.ConsumerGroup consumer_groups = 1; + * @param array<\Google\Cloud\ManagedKafka\V1\ConsumerGroup>|\Google\Protobuf\Internal\RepeatedField $var + * @return $this + */ + public function setConsumerGroups($var) + { + $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\ManagedKafka\V1\ConsumerGroup::class); + $this->consumer_groups = $arr; + + return $this; + } + + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + * @return string + */ + public function getNextPageToken() + { + return $this->next_page_token; + } + + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + * @param string $var + * @return $this + */ + public function setNextPageToken($var) + { + GPBUtil::checkString($var, True); + $this->next_page_token = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ListTopicsRequest.php b/ManagedKafka/src/V1/ListTopicsRequest.php new file mode 100644 index 000000000000..cae01297e741 --- /dev/null +++ b/ManagedKafka/src/V1/ListTopicsRequest.php @@ -0,0 +1,174 @@ +google.cloud.managedkafka.v1.ListTopicsRequest + */ +class ListTopicsRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. The parent cluster whose topics are to be listed. Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + */ + protected $parent = ''; + /** + * Optional. The maximum number of topics to return. The service may return + * fewer than this value. If unset or zero, all topics for the parent is + * returned. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $page_size = 0; + /** + * Optional. A page token, received from a previous `ListTopics` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListTopics` must match + * the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $page_token = ''; + + /** + * @param string $parent Required. The parent cluster whose topics are to be listed. Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. Please see + * {@see ManagedKafkaClient::clusterName()} for help formatting this field. + * + * @return \Google\Cloud\ManagedKafka\V1\ListTopicsRequest + * + * @experimental + */ + public static function build(string $parent): self + { + return (new self()) + ->setParent($parent); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $parent + * Required. The parent cluster whose topics are to be listed. Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * @type int $page_size + * Optional. The maximum number of topics to return. The service may return + * fewer than this value. If unset or zero, all topics for the parent is + * returned. + * @type string $page_token + * Optional. A page token, received from a previous `ListTopics` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListTopics` must match + * the call that provided the page token. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. The parent cluster whose topics are to be listed. Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @return string + */ + public function getParent() + { + return $this->parent; + } + + /** + * Required. The parent cluster whose topics are to be listed. Structured like + * `projects/{project}/locations/{location}/clusters/{cluster}`. + * + * Generated from protobuf field string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { + * @param string $var + * @return $this + */ + public function setParent($var) + { + GPBUtil::checkString($var, True); + $this->parent = $var; + + return $this; + } + + /** + * Optional. The maximum number of topics to return. The service may return + * fewer than this value. If unset or zero, all topics for the parent is + * returned. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @return int + */ + public function getPageSize() + { + return $this->page_size; + } + + /** + * Optional. The maximum number of topics to return. The service may return + * fewer than this value. If unset or zero, all topics for the parent is + * returned. + * + * Generated from protobuf field int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * @param int $var + * @return $this + */ + public function setPageSize($var) + { + GPBUtil::checkInt32($var); + $this->page_size = $var; + + return $this; + } + + /** + * Optional. A page token, received from a previous `ListTopics` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListTopics` must match + * the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * @return string + */ + public function getPageToken() + { + return $this->page_token; + } + + /** + * Optional. A page token, received from a previous `ListTopics` call. + * Provide this to retrieve the subsequent page. + * When paginating, all other parameters provided to `ListTopics` must match + * the call that provided the page token. + * + * Generated from protobuf field string page_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * @param string $var + * @return $this + */ + public function setPageToken($var) + { + GPBUtil::checkString($var, True); + $this->page_token = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/ListTopicsResponse.php b/ManagedKafka/src/V1/ListTopicsResponse.php new file mode 100644 index 000000000000..c9437db1809b --- /dev/null +++ b/ManagedKafka/src/V1/ListTopicsResponse.php @@ -0,0 +1,109 @@ +google.cloud.managedkafka.v1.ListTopicsResponse + */ +class ListTopicsResponse extends \Google\Protobuf\Internal\Message +{ + /** + * The list of topics in the requested parent. The order of the topics is + * unspecified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.Topic topics = 1; + */ + private $topics; + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + */ + protected $next_page_token = ''; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type array<\Google\Cloud\ManagedKafka\V1\Topic>|\Google\Protobuf\Internal\RepeatedField $topics + * The list of topics in the requested parent. The order of the topics is + * unspecified. + * @type string $next_page_token + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * The list of topics in the requested parent. The order of the topics is + * unspecified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.Topic topics = 1; + * @return \Google\Protobuf\Internal\RepeatedField + */ + public function getTopics() + { + return $this->topics; + } + + /** + * The list of topics in the requested parent. The order of the topics is + * unspecified. + * + * Generated from protobuf field repeated .google.cloud.managedkafka.v1.Topic topics = 1; + * @param array<\Google\Cloud\ManagedKafka\V1\Topic>|\Google\Protobuf\Internal\RepeatedField $var + * @return $this + */ + public function setTopics($var) + { + $arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::MESSAGE, \Google\Cloud\ManagedKafka\V1\Topic::class); + $this->topics = $arr; + + return $this; + } + + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + * @return string + */ + public function getNextPageToken() + { + return $this->next_page_token; + } + + /** + * A token that can be sent as `page_token` to retrieve the next page of + * results. If this field is omitted, there are no more results. + * + * Generated from protobuf field string next_page_token = 2; + * @param string $var + * @return $this + */ + public function setNextPageToken($var) + { + GPBUtil::checkString($var, True); + $this->next_page_token = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/NetworkConfig.php b/ManagedKafka/src/V1/NetworkConfig.php new file mode 100644 index 000000000000..c0efa7fdf8ad --- /dev/null +++ b/ManagedKafka/src/V1/NetworkConfig.php @@ -0,0 +1,96 @@ +google.cloud.managedkafka.v1.NetworkConfig + */ +class NetworkConfig extends \Google\Protobuf\Internal\Message +{ + /** + * Required. Name of the VPC subnet in which to create Private Service Connect + * (PSC) endpoints for the Kafka brokers and bootstrap address. Structured + * like: projects/{project}/regions/{region}/subnetworks/{subnet_id} + * The subnet must be located in the same region as the Kafka cluster. The + * project may differ. Multiple subnets from the same parent network must not + * be specified. + * The CIDR range of the subnet must be within the IPv4 address ranges for + * private networks, as specified in RFC 1918. + * + * Generated from protobuf field string subnet = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $subnet = ''; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $subnet + * Required. Name of the VPC subnet in which to create Private Service Connect + * (PSC) endpoints for the Kafka brokers and bootstrap address. Structured + * like: projects/{project}/regions/{region}/subnetworks/{subnet_id} + * The subnet must be located in the same region as the Kafka cluster. The + * project may differ. Multiple subnets from the same parent network must not + * be specified. + * The CIDR range of the subnet must be within the IPv4 address ranges for + * private networks, as specified in RFC 1918. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Required. Name of the VPC subnet in which to create Private Service Connect + * (PSC) endpoints for the Kafka brokers and bootstrap address. Structured + * like: projects/{project}/regions/{region}/subnetworks/{subnet_id} + * The subnet must be located in the same region as the Kafka cluster. The + * project may differ. Multiple subnets from the same parent network must not + * be specified. + * The CIDR range of the subnet must be within the IPv4 address ranges for + * private networks, as specified in RFC 1918. + * + * Generated from protobuf field string subnet = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return string + */ + public function getSubnet() + { + return $this->subnet; + } + + /** + * Required. Name of the VPC subnet in which to create Private Service Connect + * (PSC) endpoints for the Kafka brokers and bootstrap address. Structured + * like: projects/{project}/regions/{region}/subnetworks/{subnet_id} + * The subnet must be located in the same region as the Kafka cluster. The + * project may differ. Multiple subnets from the same parent network must not + * be specified. + * The CIDR range of the subnet must be within the IPv4 address ranges for + * private networks, as specified in RFC 1918. + * + * Generated from protobuf field string subnet = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param string $var + * @return $this + */ + public function setSubnet($var) + { + GPBUtil::checkString($var, True); + $this->subnet = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/OperationMetadata.php b/ManagedKafka/src/V1/OperationMetadata.php new file mode 100644 index 000000000000..ebfd69902bdd --- /dev/null +++ b/ManagedKafka/src/V1/OperationMetadata.php @@ -0,0 +1,307 @@ +google.cloud.managedkafka.v1.OperationMetadata + */ +class OperationMetadata extends \Google\Protobuf\Internal\Message +{ + /** + * Output only. The time the operation was created. + * + * Generated from protobuf field .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $create_time = null; + /** + * Output only. The time the operation finished running. + * + * Generated from protobuf field .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $end_time = null; + /** + * Output only. Server-defined resource path for the target of the operation. + * + * Generated from protobuf field string target = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $target = ''; + /** + * Output only. Name of the verb executed by the operation. + * + * Generated from protobuf field string verb = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $verb = ''; + /** + * Output only. Human-readable status of the operation, if any. + * + * Generated from protobuf field string status_message = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $status_message = ''; + /** + * Output only. Identifies whether the user has requested cancellation + * of the operation. Operations that have been cancelled successfully + * have [Operation.error][] value with a + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. + * + * Generated from protobuf field bool requested_cancellation = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $requested_cancellation = false; + /** + * Output only. API version used to start the operation. + * + * Generated from protobuf field string api_version = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + */ + protected $api_version = ''; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type \Google\Protobuf\Timestamp $create_time + * Output only. The time the operation was created. + * @type \Google\Protobuf\Timestamp $end_time + * Output only. The time the operation finished running. + * @type string $target + * Output only. Server-defined resource path for the target of the operation. + * @type string $verb + * Output only. Name of the verb executed by the operation. + * @type string $status_message + * Output only. Human-readable status of the operation, if any. + * @type bool $requested_cancellation + * Output only. Identifies whether the user has requested cancellation + * of the operation. Operations that have been cancelled successfully + * have [Operation.error][] value with a + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. + * @type string $api_version + * Output only. API version used to start the operation. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Output only. The time the operation was created. + * + * Generated from protobuf field .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return \Google\Protobuf\Timestamp|null + */ + public function getCreateTime() + { + return $this->create_time; + } + + public function hasCreateTime() + { + return isset($this->create_time); + } + + public function clearCreateTime() + { + unset($this->create_time); + } + + /** + * Output only. The time the operation was created. + * + * Generated from protobuf field .google.protobuf.Timestamp create_time = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param \Google\Protobuf\Timestamp $var + * @return $this + */ + public function setCreateTime($var) + { + GPBUtil::checkMessage($var, \Google\Protobuf\Timestamp::class); + $this->create_time = $var; + + return $this; + } + + /** + * Output only. The time the operation finished running. + * + * Generated from protobuf field .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return \Google\Protobuf\Timestamp|null + */ + public function getEndTime() + { + return $this->end_time; + } + + public function hasEndTime() + { + return isset($this->end_time); + } + + public function clearEndTime() + { + unset($this->end_time); + } + + /** + * Output only. The time the operation finished running. + * + * Generated from protobuf field .google.protobuf.Timestamp end_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param \Google\Protobuf\Timestamp $var + * @return $this + */ + public function setEndTime($var) + { + GPBUtil::checkMessage($var, \Google\Protobuf\Timestamp::class); + $this->end_time = $var; + + return $this; + } + + /** + * Output only. Server-defined resource path for the target of the operation. + * + * Generated from protobuf field string target = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return string + */ + public function getTarget() + { + return $this->target; + } + + /** + * Output only. Server-defined resource path for the target of the operation. + * + * Generated from protobuf field string target = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param string $var + * @return $this + */ + public function setTarget($var) + { + GPBUtil::checkString($var, True); + $this->target = $var; + + return $this; + } + + /** + * Output only. Name of the verb executed by the operation. + * + * Generated from protobuf field string verb = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return string + */ + public function getVerb() + { + return $this->verb; + } + + /** + * Output only. Name of the verb executed by the operation. + * + * Generated from protobuf field string verb = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param string $var + * @return $this + */ + public function setVerb($var) + { + GPBUtil::checkString($var, True); + $this->verb = $var; + + return $this; + } + + /** + * Output only. Human-readable status of the operation, if any. + * + * Generated from protobuf field string status_message = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return string + */ + public function getStatusMessage() + { + return $this->status_message; + } + + /** + * Output only. Human-readable status of the operation, if any. + * + * Generated from protobuf field string status_message = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param string $var + * @return $this + */ + public function setStatusMessage($var) + { + GPBUtil::checkString($var, True); + $this->status_message = $var; + + return $this; + } + + /** + * Output only. Identifies whether the user has requested cancellation + * of the operation. Operations that have been cancelled successfully + * have [Operation.error][] value with a + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. + * + * Generated from protobuf field bool requested_cancellation = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return bool + */ + public function getRequestedCancellation() + { + return $this->requested_cancellation; + } + + /** + * Output only. Identifies whether the user has requested cancellation + * of the operation. Operations that have been cancelled successfully + * have [Operation.error][] value with a + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. + * + * Generated from protobuf field bool requested_cancellation = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param bool $var + * @return $this + */ + public function setRequestedCancellation($var) + { + GPBUtil::checkBool($var); + $this->requested_cancellation = $var; + + return $this; + } + + /** + * Output only. API version used to start the operation. + * + * Generated from protobuf field string api_version = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @return string + */ + public function getApiVersion() + { + return $this->api_version; + } + + /** + * Output only. API version used to start the operation. + * + * Generated from protobuf field string api_version = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * @param string $var + * @return $this + */ + public function setApiVersion($var) + { + GPBUtil::checkString($var, True); + $this->api_version = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/RebalanceConfig.php b/ManagedKafka/src/V1/RebalanceConfig.php new file mode 100644 index 000000000000..b13164db93f7 --- /dev/null +++ b/ManagedKafka/src/V1/RebalanceConfig.php @@ -0,0 +1,71 @@ +google.cloud.managedkafka.v1.RebalanceConfig + */ +class RebalanceConfig extends \Google\Protobuf\Internal\Message +{ + /** + * Optional. The rebalance behavior for the cluster. + * When not specified, defaults to `NO_REBALANCE`. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.RebalanceConfig.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + */ + protected $mode = 0; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type int $mode + * Optional. The rebalance behavior for the cluster. + * When not specified, defaults to `NO_REBALANCE`. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Optional. The rebalance behavior for the cluster. + * When not specified, defaults to `NO_REBALANCE`. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.RebalanceConfig.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * @return int + */ + public function getMode() + { + return $this->mode; + } + + /** + * Optional. The rebalance behavior for the cluster. + * When not specified, defaults to `NO_REBALANCE`. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.RebalanceConfig.Mode mode = 1 [(.google.api.field_behavior) = OPTIONAL]; + * @param int $var + * @return $this + */ + public function setMode($var) + { + GPBUtil::checkEnum($var, \Google\Cloud\ManagedKafka\V1\RebalanceConfig\Mode::class); + $this->mode = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/RebalanceConfig/Mode.php b/ManagedKafka/src/V1/RebalanceConfig/Mode.php new file mode 100644 index 000000000000..a30fddfc8651 --- /dev/null +++ b/ManagedKafka/src/V1/RebalanceConfig/Mode.php @@ -0,0 +1,63 @@ +google.cloud.managedkafka.v1.RebalanceConfig.Mode + */ +class Mode +{ + /** + * A mode was not specified. Do not use. + * + * Generated from protobuf enum MODE_UNSPECIFIED = 0; + */ + const MODE_UNSPECIFIED = 0; + /** + * Do not rebalance automatically. + * + * Generated from protobuf enum NO_REBALANCE = 1; + */ + const NO_REBALANCE = 1; + /** + * Automatically rebalance topic partitions among brokers when the + * cluster is scaled up. + * + * Generated from protobuf enum AUTO_REBALANCE_ON_SCALE_UP = 2; + */ + const AUTO_REBALANCE_ON_SCALE_UP = 2; + + private static $valueToName = [ + self::MODE_UNSPECIFIED => 'MODE_UNSPECIFIED', + self::NO_REBALANCE => 'NO_REBALANCE', + self::AUTO_REBALANCE_ON_SCALE_UP => 'AUTO_REBALANCE_ON_SCALE_UP', + ]; + + public static function name($value) + { + if (!isset(self::$valueToName[$value])) { + throw new UnexpectedValueException(sprintf( + 'Enum %s has no name defined for value %s', __CLASS__, $value)); + } + return self::$valueToName[$value]; + } + + + public static function value($name) + { + $const = __CLASS__ . '::' . strtoupper($name); + if (!defined($const)) { + throw new UnexpectedValueException(sprintf( + 'Enum %s has no value defined for name %s', __CLASS__, $name)); + } + return constant($const); + } +} + + diff --git a/ManagedKafka/src/V1/Topic.php b/ManagedKafka/src/V1/Topic.php new file mode 100644 index 000000000000..da04d6f0f968 --- /dev/null +++ b/ManagedKafka/src/V1/Topic.php @@ -0,0 +1,201 @@ +google.cloud.managedkafka.v1.Topic + */ +class Topic extends \Google\Protobuf\Internal\Message +{ + /** + * Identifier. The name of the topic. The `topic` segment is used when + * connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + */ + protected $name = ''; + /** + * Required. The number of partitions this topic has. The partition count can + * only be increased, not decreased. Please note that if partitions are + * increased for a topic that has a key, the partitioning logic or the + * ordering of the messages will be affected. + * + * Generated from protobuf field int32 partition_count = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $partition_count = 0; + /** + * Required. Immutable. The number of replicas of each partition. A + * replication factor of 3 is recommended for high availability. + * + * Generated from protobuf field int32 replication_factor = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + */ + protected $replication_factor = 0; + /** + * Optional. Configurations for the topic that are overridden from the cluster + * defaults. The key of the map is a Kafka topic property name, for example: + * `cleanup.policy`, `compression.type`. + * + * Generated from protobuf field map configs = 4 [(.google.api.field_behavior) = OPTIONAL]; + */ + private $configs; + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type string $name + * Identifier. The name of the topic. The `topic` segment is used when + * connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * @type int $partition_count + * Required. The number of partitions this topic has. The partition count can + * only be increased, not decreased. Please note that if partitions are + * increased for a topic that has a key, the partitioning logic or the + * ordering of the messages will be affected. + * @type int $replication_factor + * Required. Immutable. The number of replicas of each partition. A + * replication factor of 3 is recommended for high availability. + * @type array|\Google\Protobuf\Internal\MapField $configs + * Optional. Configurations for the topic that are overridden from the cluster + * defaults. The key of the map is a Kafka topic property name, for example: + * `cleanup.policy`, `compression.type`. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\Resources::initOnce(); + parent::__construct($data); + } + + /** + * Identifier. The name of the topic. The `topic` segment is used when + * connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * @return string + */ + public function getName() + { + return $this->name; + } + + /** + * Identifier. The name of the topic. The `topic` segment is used when + * connecting directly to the cluster. Structured like: + * projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + * + * Generated from protobuf field string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * @param string $var + * @return $this + */ + public function setName($var) + { + GPBUtil::checkString($var, True); + $this->name = $var; + + return $this; + } + + /** + * Required. The number of partitions this topic has. The partition count can + * only be increased, not decreased. Please note that if partitions are + * increased for a topic that has a key, the partitioning logic or the + * ordering of the messages will be affected. + * + * Generated from protobuf field int32 partition_count = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return int + */ + public function getPartitionCount() + { + return $this->partition_count; + } + + /** + * Required. The number of partitions this topic has. The partition count can + * only be increased, not decreased. Please note that if partitions are + * increased for a topic that has a key, the partitioning logic or the + * ordering of the messages will be affected. + * + * Generated from protobuf field int32 partition_count = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param int $var + * @return $this + */ + public function setPartitionCount($var) + { + GPBUtil::checkInt32($var); + $this->partition_count = $var; + + return $this; + } + + /** + * Required. Immutable. The number of replicas of each partition. A + * replication factor of 3 is recommended for high availability. + * + * Generated from protobuf field int32 replication_factor = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * @return int + */ + public function getReplicationFactor() + { + return $this->replication_factor; + } + + /** + * Required. Immutable. The number of replicas of each partition. A + * replication factor of 3 is recommended for high availability. + * + * Generated from protobuf field int32 replication_factor = 3 [(.google.api.field_behavior) = REQUIRED, (.google.api.field_behavior) = IMMUTABLE]; + * @param int $var + * @return $this + */ + public function setReplicationFactor($var) + { + GPBUtil::checkInt32($var); + $this->replication_factor = $var; + + return $this; + } + + /** + * Optional. Configurations for the topic that are overridden from the cluster + * defaults. The key of the map is a Kafka topic property name, for example: + * `cleanup.policy`, `compression.type`. + * + * Generated from protobuf field map configs = 4 [(.google.api.field_behavior) = OPTIONAL]; + * @return \Google\Protobuf\Internal\MapField + */ + public function getConfigs() + { + return $this->configs; + } + + /** + * Optional. Configurations for the topic that are overridden from the cluster + * defaults. The key of the map is a Kafka topic property name, for example: + * `cleanup.policy`, `compression.type`. + * + * Generated from protobuf field map configs = 4 [(.google.api.field_behavior) = OPTIONAL]; + * @param array|\Google\Protobuf\Internal\MapField $var + * @return $this + */ + public function setConfigs($var) + { + $arr = GPBUtil::checkMapField($var, \Google\Protobuf\Internal\GPBType::STRING, \Google\Protobuf\Internal\GPBType::STRING); + $this->configs = $arr; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/UpdateClusterRequest.php b/ManagedKafka/src/V1/UpdateClusterRequest.php new file mode 100644 index 000000000000..93b64c314b65 --- /dev/null +++ b/ManagedKafka/src/V1/UpdateClusterRequest.php @@ -0,0 +1,234 @@ +google.cloud.managedkafka.v1.UpdateClusterRequest + */ +class UpdateClusterRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * cluster resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $update_mask = null; + /** + * Required. The cluster to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $cluster = null; + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + */ + protected $request_id = ''; + + /** + * @param \Google\Cloud\ManagedKafka\V1\Cluster $cluster Required. The cluster to update. Its `name` field must be populated. + * @param \Google\Protobuf\FieldMask $updateMask Required. Field mask is used to specify the fields to be overwritten in the + * cluster resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * @return \Google\Cloud\ManagedKafka\V1\UpdateClusterRequest + * + * @experimental + */ + public static function build(\Google\Cloud\ManagedKafka\V1\Cluster $cluster, \Google\Protobuf\FieldMask $updateMask): self + { + return (new self()) + ->setCluster($cluster) + ->setUpdateMask($updateMask); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type \Google\Protobuf\FieldMask $update_mask + * Required. Field mask is used to specify the fields to be overwritten in the + * cluster resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * @type \Google\Cloud\ManagedKafka\V1\Cluster $cluster + * Required. The cluster to update. Its `name` field must be populated. + * @type string $request_id + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * cluster resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Protobuf\FieldMask|null + */ + public function getUpdateMask() + { + return $this->update_mask; + } + + public function hasUpdateMask() + { + return isset($this->update_mask); + } + + public function clearUpdateMask() + { + unset($this->update_mask); + } + + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * cluster resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Protobuf\FieldMask $var + * @return $this + */ + public function setUpdateMask($var) + { + GPBUtil::checkMessage($var, \Google\Protobuf\FieldMask::class); + $this->update_mask = $var; + + return $this; + } + + /** + * Required. The cluster to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\Cluster|null + */ + public function getCluster() + { + return $this->cluster; + } + + public function hasCluster() + { + return isset($this->cluster); + } + + public function clearCluster() + { + unset($this->cluster); + } + + /** + * Required. The cluster to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Cluster cluster = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\Cluster $var + * @return $this + */ + public function setCluster($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\Cluster::class); + $this->cluster = $var; + + return $this; + } + + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + * @return string + */ + public function getRequestId() + { + return $this->request_id; + } + + /** + * Optional. An optional request ID to identify requests. Specify a unique + * request ID to avoid duplication of requests. If a request times out or + * fails, retrying with the same ID allows the server to recognize the + * previous attempt. For at least 60 minutes, the server ignores duplicate + * requests bearing the same ID. + * For example, consider a situation where you make an initial request and the + * request times out. If you make the request again with the same request ID + * within 60 minutes of the last request, the server checks if an original + * operation with the same request ID was received. If so, the server ignores + * the second request. + * The request ID must be a valid UUID. A zero UUID is not supported + * (00000000-0000-0000-0000-000000000000). + * + * Generated from protobuf field string request_id = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.field_info) = { + * @param string $var + * @return $this + */ + public function setRequestId($var) + { + GPBUtil::checkString($var, True); + $this->request_id = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/UpdateConsumerGroupRequest.php b/ManagedKafka/src/V1/UpdateConsumerGroupRequest.php new file mode 100644 index 000000000000..dcdd3471a832 --- /dev/null +++ b/ManagedKafka/src/V1/UpdateConsumerGroupRequest.php @@ -0,0 +1,156 @@ +google.cloud.managedkafka.v1.UpdateConsumerGroupRequest + */ +class UpdateConsumerGroupRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * ConsumerGroup resource by the update. + * The fields specified in the update_mask are relative to the resource, not + * the full request. A field will be overwritten if it is in the mask. The + * mask is required and a value of * will update all fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $update_mask = null; + /** + * Required. The consumer group to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.ConsumerGroup consumer_group = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $consumer_group = null; + + /** + * @param \Google\Cloud\ManagedKafka\V1\ConsumerGroup $consumerGroup Required. The consumer group to update. Its `name` field must be populated. + * @param \Google\Protobuf\FieldMask $updateMask Required. Field mask is used to specify the fields to be overwritten in the + * ConsumerGroup resource by the update. + * The fields specified in the update_mask are relative to the resource, not + * the full request. A field will be overwritten if it is in the mask. The + * mask is required and a value of * will update all fields. + * + * @return \Google\Cloud\ManagedKafka\V1\UpdateConsumerGroupRequest + * + * @experimental + */ + public static function build(\Google\Cloud\ManagedKafka\V1\ConsumerGroup $consumerGroup, \Google\Protobuf\FieldMask $updateMask): self + { + return (new self()) + ->setConsumerGroup($consumerGroup) + ->setUpdateMask($updateMask); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type \Google\Protobuf\FieldMask $update_mask + * Required. Field mask is used to specify the fields to be overwritten in the + * ConsumerGroup resource by the update. + * The fields specified in the update_mask are relative to the resource, not + * the full request. A field will be overwritten if it is in the mask. The + * mask is required and a value of * will update all fields. + * @type \Google\Cloud\ManagedKafka\V1\ConsumerGroup $consumer_group + * Required. The consumer group to update. Its `name` field must be populated. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * ConsumerGroup resource by the update. + * The fields specified in the update_mask are relative to the resource, not + * the full request. A field will be overwritten if it is in the mask. The + * mask is required and a value of * will update all fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Protobuf\FieldMask|null + */ + public function getUpdateMask() + { + return $this->update_mask; + } + + public function hasUpdateMask() + { + return isset($this->update_mask); + } + + public function clearUpdateMask() + { + unset($this->update_mask); + } + + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * ConsumerGroup resource by the update. + * The fields specified in the update_mask are relative to the resource, not + * the full request. A field will be overwritten if it is in the mask. The + * mask is required and a value of * will update all fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Protobuf\FieldMask $var + * @return $this + */ + public function setUpdateMask($var) + { + GPBUtil::checkMessage($var, \Google\Protobuf\FieldMask::class); + $this->update_mask = $var; + + return $this; + } + + /** + * Required. The consumer group to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.ConsumerGroup consumer_group = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\ConsumerGroup|null + */ + public function getConsumerGroup() + { + return $this->consumer_group; + } + + public function hasConsumerGroup() + { + return isset($this->consumer_group); + } + + public function clearConsumerGroup() + { + unset($this->consumer_group); + } + + /** + * Required. The consumer group to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.ConsumerGroup consumer_group = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\ConsumerGroup $var + * @return $this + */ + public function setConsumerGroup($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\ConsumerGroup::class); + $this->consumer_group = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/UpdateTopicRequest.php b/ManagedKafka/src/V1/UpdateTopicRequest.php new file mode 100644 index 000000000000..3e2a1f8792eb --- /dev/null +++ b/ManagedKafka/src/V1/UpdateTopicRequest.php @@ -0,0 +1,156 @@ +google.cloud.managedkafka.v1.UpdateTopicRequest + */ +class UpdateTopicRequest extends \Google\Protobuf\Internal\Message +{ + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * Topic resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $update_mask = null; + /** + * Required. The topic to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Topic topic = 2 [(.google.api.field_behavior) = REQUIRED]; + */ + protected $topic = null; + + /** + * @param \Google\Cloud\ManagedKafka\V1\Topic $topic Required. The topic to update. Its `name` field must be populated. + * @param \Google\Protobuf\FieldMask $updateMask Required. Field mask is used to specify the fields to be overwritten in the + * Topic resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * @return \Google\Cloud\ManagedKafka\V1\UpdateTopicRequest + * + * @experimental + */ + public static function build(\Google\Cloud\ManagedKafka\V1\Topic $topic, \Google\Protobuf\FieldMask $updateMask): self + { + return (new self()) + ->setTopic($topic) + ->setUpdateMask($updateMask); + } + + /** + * Constructor. + * + * @param array $data { + * Optional. Data for populating the Message object. + * + * @type \Google\Protobuf\FieldMask $update_mask + * Required. Field mask is used to specify the fields to be overwritten in the + * Topic resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * @type \Google\Cloud\ManagedKafka\V1\Topic $topic + * Required. The topic to update. Its `name` field must be populated. + * } + */ + public function __construct($data = NULL) { + \GPBMetadata\Google\Cloud\Managedkafka\V1\ManagedKafka::initOnce(); + parent::__construct($data); + } + + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * Topic resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Protobuf\FieldMask|null + */ + public function getUpdateMask() + { + return $this->update_mask; + } + + public function hasUpdateMask() + { + return isset($this->update_mask); + } + + public function clearUpdateMask() + { + unset($this->update_mask); + } + + /** + * Required. Field mask is used to specify the fields to be overwritten in the + * Topic resource by the update. The fields specified in the update_mask are + * relative to the resource, not the full request. A field will be overwritten + * if it is in the mask. The mask is required and a value of * will update all + * fields. + * + * Generated from protobuf field .google.protobuf.FieldMask update_mask = 1 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Protobuf\FieldMask $var + * @return $this + */ + public function setUpdateMask($var) + { + GPBUtil::checkMessage($var, \Google\Protobuf\FieldMask::class); + $this->update_mask = $var; + + return $this; + } + + /** + * Required. The topic to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Topic topic = 2 [(.google.api.field_behavior) = REQUIRED]; + * @return \Google\Cloud\ManagedKafka\V1\Topic|null + */ + public function getTopic() + { + return $this->topic; + } + + public function hasTopic() + { + return isset($this->topic); + } + + public function clearTopic() + { + unset($this->topic); + } + + /** + * Required. The topic to update. Its `name` field must be populated. + * + * Generated from protobuf field .google.cloud.managedkafka.v1.Topic topic = 2 [(.google.api.field_behavior) = REQUIRED]; + * @param \Google\Cloud\ManagedKafka\V1\Topic $var + * @return $this + */ + public function setTopic($var) + { + GPBUtil::checkMessage($var, \Google\Cloud\ManagedKafka\V1\Topic::class); + $this->topic = $var; + + return $this; + } + +} + diff --git a/ManagedKafka/src/V1/gapic_metadata.json b/ManagedKafka/src/V1/gapic_metadata.json new file mode 100644 index 000000000000..52eab10d2dac --- /dev/null +++ b/ManagedKafka/src/V1/gapic_metadata.json @@ -0,0 +1,98 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services\/RPCs to the corresponding library clients\/methods", + "language": "php", + "protoPackage": "google.cloud.managedkafka.v1", + "libraryPackage": "Google\\Cloud\\ManagedKafka\\V1", + "services": { + "ManagedKafka": { + "clients": { + "grpc": { + "libraryClient": "ManagedKafkaGapicClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "createCluster" + ] + }, + "CreateTopic": { + "methods": [ + "createTopic" + ] + }, + "DeleteCluster": { + "methods": [ + "deleteCluster" + ] + }, + "DeleteConsumerGroup": { + "methods": [ + "deleteConsumerGroup" + ] + }, + "DeleteTopic": { + "methods": [ + "deleteTopic" + ] + }, + "GetCluster": { + "methods": [ + "getCluster" + ] + }, + "GetConsumerGroup": { + "methods": [ + "getConsumerGroup" + ] + }, + "GetTopic": { + "methods": [ + "getTopic" + ] + }, + "ListClusters": { + "methods": [ + "listClusters" + ] + }, + "ListConsumerGroups": { + "methods": [ + "listConsumerGroups" + ] + }, + "ListTopics": { + "methods": [ + "listTopics" + ] + }, + "UpdateCluster": { + "methods": [ + "updateCluster" + ] + }, + "UpdateConsumerGroup": { + "methods": [ + "updateConsumerGroup" + ] + }, + "UpdateTopic": { + "methods": [ + "updateTopic" + ] + }, + "GetLocation": { + "methods": [ + "getLocation" + ] + }, + "ListLocations": { + "methods": [ + "listLocations" + ] + } + } + } + } + } + } +} \ No newline at end of file diff --git a/ManagedKafka/src/V1/resources/managed_kafka_client_config.json b/ManagedKafka/src/V1/resources/managed_kafka_client_config.json new file mode 100644 index 000000000000..8585444e9db2 --- /dev/null +++ b/ManagedKafka/src/V1/resources/managed_kafka_client_config.json @@ -0,0 +1,124 @@ +{ + "interfaces": { + "google.cloud.managedkafka.v1.ManagedKafka": { + "retry_codes": { + "no_retry_codes": [], + "retry_policy_1_codes": [ + "UNAVAILABLE" + ], + "no_retry_1_codes": [] + }, + "retry_params": { + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0 + }, + "retry_policy_1_params": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 10000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000 + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000 + } + }, + "methods": { + "CreateCluster": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "CreateTopic": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "DeleteCluster": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "DeleteConsumerGroup": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "DeleteTopic": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "GetCluster": { + "timeout_millis": 60000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params" + }, + "GetConsumerGroup": { + "timeout_millis": 60000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params" + }, + "GetTopic": { + "timeout_millis": 60000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params" + }, + "ListClusters": { + "timeout_millis": 60000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params" + }, + "ListConsumerGroups": { + "timeout_millis": 60000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params" + }, + "ListTopics": { + "timeout_millis": 60000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params" + }, + "UpdateCluster": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "UpdateConsumerGroup": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "UpdateTopic": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params" + }, + "GetLocation": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_codes", + "retry_params_name": "no_retry_params" + }, + "ListLocations": { + "timeout_millis": 60000, + "retry_codes_name": "no_retry_codes", + "retry_params_name": "no_retry_params" + } + } + } + } +} diff --git a/ManagedKafka/src/V1/resources/managed_kafka_descriptor_config.php b/ManagedKafka/src/V1/resources/managed_kafka_descriptor_config.php new file mode 100644 index 000000000000..37b43b976d5a --- /dev/null +++ b/ManagedKafka/src/V1/resources/managed_kafka_descriptor_config.php @@ -0,0 +1,285 @@ + [ + 'google.cloud.managedkafka.v1.ManagedKafka' => [ + 'CreateCluster' => [ + 'longRunning' => [ + 'operationReturnType' => '\Google\Cloud\ManagedKafka\V1\Cluster', + 'metadataReturnType' => '\Google\Cloud\ManagedKafka\V1\OperationMetadata', + 'initialPollDelayMillis' => '500', + 'pollDelayMultiplier' => '1.5', + 'maxPollDelayMillis' => '5000', + 'totalPollTimeoutMillis' => '300000', + ], + 'callType' => \Google\ApiCore\Call::LONGRUNNING_CALL, + 'headerParams' => [ + [ + 'keyName' => 'parent', + 'fieldAccessors' => [ + 'getParent', + ], + ], + ], + ], + 'DeleteCluster' => [ + 'longRunning' => [ + 'operationReturnType' => '\Google\Protobuf\GPBEmpty', + 'metadataReturnType' => '\Google\Cloud\ManagedKafka\V1\OperationMetadata', + 'initialPollDelayMillis' => '500', + 'pollDelayMultiplier' => '1.5', + 'maxPollDelayMillis' => '5000', + 'totalPollTimeoutMillis' => '300000', + ], + 'callType' => \Google\ApiCore\Call::LONGRUNNING_CALL, + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + ], + 'UpdateCluster' => [ + 'longRunning' => [ + 'operationReturnType' => '\Google\Cloud\ManagedKafka\V1\Cluster', + 'metadataReturnType' => '\Google\Cloud\ManagedKafka\V1\OperationMetadata', + 'initialPollDelayMillis' => '500', + 'pollDelayMultiplier' => '1.5', + 'maxPollDelayMillis' => '5000', + 'totalPollTimeoutMillis' => '300000', + ], + 'callType' => \Google\ApiCore\Call::LONGRUNNING_CALL, + 'headerParams' => [ + [ + 'keyName' => 'cluster.name', + 'fieldAccessors' => [ + 'getCluster', + 'getName', + ], + ], + ], + ], + 'CreateTopic' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\Topic', + 'headerParams' => [ + [ + 'keyName' => 'parent', + 'fieldAccessors' => [ + 'getParent', + ], + ], + ], + ], + 'DeleteConsumerGroup' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Protobuf\GPBEmpty', + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + ], + 'DeleteTopic' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Protobuf\GPBEmpty', + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + ], + 'GetCluster' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\Cluster', + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + ], + 'GetConsumerGroup' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\ConsumerGroup', + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + ], + 'GetTopic' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\Topic', + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + ], + 'ListClusters' => [ + 'pageStreaming' => [ + 'requestPageTokenGetMethod' => 'getPageToken', + 'requestPageTokenSetMethod' => 'setPageToken', + 'requestPageSizeGetMethod' => 'getPageSize', + 'requestPageSizeSetMethod' => 'setPageSize', + 'responsePageTokenGetMethod' => 'getNextPageToken', + 'resourcesGetMethod' => 'getClusters', + ], + 'callType' => \Google\ApiCore\Call::PAGINATED_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\ListClustersResponse', + 'headerParams' => [ + [ + 'keyName' => 'parent', + 'fieldAccessors' => [ + 'getParent', + ], + ], + ], + ], + 'ListConsumerGroups' => [ + 'pageStreaming' => [ + 'requestPageTokenGetMethod' => 'getPageToken', + 'requestPageTokenSetMethod' => 'setPageToken', + 'requestPageSizeGetMethod' => 'getPageSize', + 'requestPageSizeSetMethod' => 'setPageSize', + 'responsePageTokenGetMethod' => 'getNextPageToken', + 'resourcesGetMethod' => 'getConsumerGroups', + ], + 'callType' => \Google\ApiCore\Call::PAGINATED_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\ListConsumerGroupsResponse', + 'headerParams' => [ + [ + 'keyName' => 'parent', + 'fieldAccessors' => [ + 'getParent', + ], + ], + ], + ], + 'ListTopics' => [ + 'pageStreaming' => [ + 'requestPageTokenGetMethod' => 'getPageToken', + 'requestPageTokenSetMethod' => 'setPageToken', + 'requestPageSizeGetMethod' => 'getPageSize', + 'requestPageSizeSetMethod' => 'setPageSize', + 'responsePageTokenGetMethod' => 'getNextPageToken', + 'resourcesGetMethod' => 'getTopics', + ], + 'callType' => \Google\ApiCore\Call::PAGINATED_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\ListTopicsResponse', + 'headerParams' => [ + [ + 'keyName' => 'parent', + 'fieldAccessors' => [ + 'getParent', + ], + ], + ], + ], + 'UpdateConsumerGroup' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\ConsumerGroup', + 'headerParams' => [ + [ + 'keyName' => 'consumer_group.name', + 'fieldAccessors' => [ + 'getConsumerGroup', + 'getName', + ], + ], + ], + ], + 'UpdateTopic' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Cloud\ManagedKafka\V1\Topic', + 'headerParams' => [ + [ + 'keyName' => 'topic.name', + 'fieldAccessors' => [ + 'getTopic', + 'getName', + ], + ], + ], + ], + 'GetLocation' => [ + 'callType' => \Google\ApiCore\Call::UNARY_CALL, + 'responseType' => 'Google\Cloud\Location\Location', + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + 'interfaceOverride' => 'google.cloud.location.Locations', + ], + 'ListLocations' => [ + 'pageStreaming' => [ + 'requestPageTokenGetMethod' => 'getPageToken', + 'requestPageTokenSetMethod' => 'setPageToken', + 'requestPageSizeGetMethod' => 'getPageSize', + 'requestPageSizeSetMethod' => 'setPageSize', + 'responsePageTokenGetMethod' => 'getNextPageToken', + 'resourcesGetMethod' => 'getLocations', + ], + 'callType' => \Google\ApiCore\Call::PAGINATED_CALL, + 'responseType' => 'Google\Cloud\Location\ListLocationsResponse', + 'headerParams' => [ + [ + 'keyName' => 'name', + 'fieldAccessors' => [ + 'getName', + ], + ], + ], + 'interfaceOverride' => 'google.cloud.location.Locations', + ], + 'templateMap' => [ + 'cluster' => 'projects/{project}/locations/{location}/clusters/{cluster}', + 'consumerGroup' => 'projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group}', + 'cryptoKey' => 'projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}', + 'location' => 'projects/{project}/locations/{location}', + 'topic' => 'projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}', + ], + ], + ], +]; diff --git a/ManagedKafka/src/V1/resources/managed_kafka_rest_client_config.php b/ManagedKafka/src/V1/resources/managed_kafka_rest_client_config.php new file mode 100644 index 000000000000..f6fcea12de47 --- /dev/null +++ b/ManagedKafka/src/V1/resources/managed_kafka_rest_client_config.php @@ -0,0 +1,277 @@ + [ + 'google.cloud.location.Locations' => [ + 'GetLocation' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{name=projects/*/locations/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'ListLocations' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{name=projects/*}/locations', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + ], + 'google.cloud.managedkafka.v1.ManagedKafka' => [ + 'CreateCluster' => [ + 'method' => 'post', + 'uriTemplate' => '/v1/{parent=projects/*/locations/*}/clusters', + 'body' => 'cluster', + 'placeholders' => [ + 'parent' => [ + 'getters' => [ + 'getParent', + ], + ], + ], + 'queryParams' => [ + 'cluster_id', + ], + ], + 'CreateTopic' => [ + 'method' => 'post', + 'uriTemplate' => '/v1/{parent=projects/*/locations/*/clusters/*}/topics', + 'body' => 'topic', + 'placeholders' => [ + 'parent' => [ + 'getters' => [ + 'getParent', + ], + ], + ], + 'queryParams' => [ + 'topic_id', + ], + ], + 'DeleteCluster' => [ + 'method' => 'delete', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/clusters/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'DeleteConsumerGroup' => [ + 'method' => 'delete', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'DeleteTopic' => [ + 'method' => 'delete', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/clusters/*/topics/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'GetCluster' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/clusters/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'GetConsumerGroup' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'GetTopic' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/clusters/*/topics/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'ListClusters' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{parent=projects/*/locations/*}/clusters', + 'placeholders' => [ + 'parent' => [ + 'getters' => [ + 'getParent', + ], + ], + ], + ], + 'ListConsumerGroups' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{parent=projects/*/locations/*/clusters/*}/consumerGroups', + 'placeholders' => [ + 'parent' => [ + 'getters' => [ + 'getParent', + ], + ], + ], + ], + 'ListTopics' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{parent=projects/*/locations/*/clusters/*}/topics', + 'placeholders' => [ + 'parent' => [ + 'getters' => [ + 'getParent', + ], + ], + ], + ], + 'UpdateCluster' => [ + 'method' => 'patch', + 'uriTemplate' => '/v1/{cluster.name=projects/*/locations/*/clusters/*}', + 'body' => 'cluster', + 'placeholders' => [ + 'cluster.name' => [ + 'getters' => [ + 'getCluster', + 'getName', + ], + ], + ], + 'queryParams' => [ + 'update_mask', + ], + ], + 'UpdateConsumerGroup' => [ + 'method' => 'patch', + 'uriTemplate' => '/v1/{consumer_group.name=projects/*/locations/*/clusters/*/consumerGroups/*}', + 'body' => 'consumer_group', + 'placeholders' => [ + 'consumer_group.name' => [ + 'getters' => [ + 'getConsumerGroup', + 'getName', + ], + ], + ], + 'queryParams' => [ + 'update_mask', + ], + ], + 'UpdateTopic' => [ + 'method' => 'patch', + 'uriTemplate' => '/v1/{topic.name=projects/*/locations/*/clusters/*/topics/*}', + 'body' => 'topic', + 'placeholders' => [ + 'topic.name' => [ + 'getters' => [ + 'getTopic', + 'getName', + ], + ], + ], + 'queryParams' => [ + 'update_mask', + ], + ], + ], + 'google.longrunning.Operations' => [ + 'CancelOperation' => [ + 'method' => 'post', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/operations/*}:cancel', + 'body' => '*', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'DeleteOperation' => [ + 'method' => 'delete', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/operations/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'GetOperation' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{name=projects/*/locations/*/operations/*}', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + 'ListOperations' => [ + 'method' => 'get', + 'uriTemplate' => '/v1/{name=projects/*/locations/*}/operations', + 'placeholders' => [ + 'name' => [ + 'getters' => [ + 'getName', + ], + ], + ], + ], + ], + ], + 'numericEnums' => true, +]; diff --git a/ManagedKafka/tests/Unit/V1/Client/ManagedKafkaClientTest.php b/ManagedKafka/tests/Unit/V1/Client/ManagedKafkaClientTest.php new file mode 100644 index 000000000000..0cdbbd8744e2 --- /dev/null +++ b/ManagedKafka/tests/Unit/V1/Client/ManagedKafkaClientTest.php @@ -0,0 +1,1522 @@ +getMockBuilder(CredentialsWrapper::class) + ->disableOriginalConstructor() + ->getMock(); + } + + /** @return ManagedKafkaClient */ + private function createClient(array $options = []) + { + $options += [ + 'credentials' => $this->createCredentials(), + ]; + return new ManagedKafkaClient($options); + } + + /** @test */ + public function createClusterTest() + { + $operationsTransport = $this->createTransport(); + $operationsClient = new OperationsClient([ + 'apiEndpoint' => '', + 'transport' => $operationsTransport, + 'credentials' => $this->createCredentials(), + ]); + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + 'operationsClient' => $operationsClient, + ]); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + // Mock response + $incompleteOperation = new Operation(); + $incompleteOperation->setName('operations/createClusterTest'); + $incompleteOperation->setDone(false); + $transport->addResponse($incompleteOperation); + $name = 'name3373707'; + $expectedResponse = new Cluster(); + $expectedResponse->setName($name); + $anyResponse = new Any(); + $anyResponse->setValue($expectedResponse->serializeToString()); + $completeOperation = new Operation(); + $completeOperation->setName('operations/createClusterTest'); + $completeOperation->setDone(true); + $completeOperation->setResponse($anyResponse); + $operationsTransport->addResponse($completeOperation); + // Mock request + $formattedParent = $gapicClient->locationName('[PROJECT]', '[LOCATION]'); + $clusterId = 'clusterId240280960'; + $cluster = new Cluster(); + $clusterCapacityConfig = new CapacityConfig(); + $capacityConfigVcpuCount = 1944563327; + $clusterCapacityConfig->setVcpuCount($capacityConfigVcpuCount); + $capacityConfigMemoryBytes = 743041454; + $clusterCapacityConfig->setMemoryBytes($capacityConfigMemoryBytes); + $cluster->setCapacityConfig($clusterCapacityConfig); + $clusterGcpConfig = new GcpConfig(); + $gcpConfigAccessConfig = new AccessConfig(); + $accessConfigNetworkConfigs = []; + $gcpConfigAccessConfig->setNetworkConfigs($accessConfigNetworkConfigs); + $clusterGcpConfig->setAccessConfig($gcpConfigAccessConfig); + $cluster->setGcpConfig($clusterGcpConfig); + $request = (new CreateClusterRequest()) + ->setParent($formattedParent) + ->setClusterId($clusterId) + ->setCluster($cluster); + $response = $gapicClient->createCluster($request); + $this->assertFalse($response->isDone()); + $this->assertNull($response->getResult()); + $apiRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($apiRequests)); + $operationsRequestsEmpty = $operationsTransport->popReceivedCalls(); + $this->assertSame(0, count($operationsRequestsEmpty)); + $actualApiFuncCall = $apiRequests[0]->getFuncCall(); + $actualApiRequestObject = $apiRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/CreateCluster', $actualApiFuncCall); + $actualValue = $actualApiRequestObject->getParent(); + $this->assertProtobufEquals($formattedParent, $actualValue); + $actualValue = $actualApiRequestObject->getClusterId(); + $this->assertProtobufEquals($clusterId, $actualValue); + $actualValue = $actualApiRequestObject->getCluster(); + $this->assertProtobufEquals($cluster, $actualValue); + $expectedOperationsRequestObject = new GetOperationRequest(); + $expectedOperationsRequestObject->setName('operations/createClusterTest'); + $response->pollUntilComplete([ + 'initialPollDelayMillis' => 1, + ]); + $this->assertTrue($response->isDone()); + $this->assertEquals($expectedResponse, $response->getResult()); + $apiRequestsEmpty = $transport->popReceivedCalls(); + $this->assertSame(0, count($apiRequestsEmpty)); + $operationsRequests = $operationsTransport->popReceivedCalls(); + $this->assertSame(1, count($operationsRequests)); + $actualOperationsFuncCall = $operationsRequests[0]->getFuncCall(); + $actualOperationsRequestObject = $operationsRequests[0]->getRequestObject(); + $this->assertSame('/google.longrunning.Operations/GetOperation', $actualOperationsFuncCall); + $this->assertEquals($expectedOperationsRequestObject, $actualOperationsRequestObject); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + } + + /** @test */ + public function createClusterExceptionTest() + { + $operationsTransport = $this->createTransport(); + $operationsClient = new OperationsClient([ + 'apiEndpoint' => '', + 'transport' => $operationsTransport, + 'credentials' => $this->createCredentials(), + ]); + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + 'operationsClient' => $operationsClient, + ]); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + // Mock response + $incompleteOperation = new Operation(); + $incompleteOperation->setName('operations/createClusterTest'); + $incompleteOperation->setDone(false); + $transport->addResponse($incompleteOperation); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $operationsTransport->addResponse(null, $status); + // Mock request + $formattedParent = $gapicClient->locationName('[PROJECT]', '[LOCATION]'); + $clusterId = 'clusterId240280960'; + $cluster = new Cluster(); + $clusterCapacityConfig = new CapacityConfig(); + $capacityConfigVcpuCount = 1944563327; + $clusterCapacityConfig->setVcpuCount($capacityConfigVcpuCount); + $capacityConfigMemoryBytes = 743041454; + $clusterCapacityConfig->setMemoryBytes($capacityConfigMemoryBytes); + $cluster->setCapacityConfig($clusterCapacityConfig); + $clusterGcpConfig = new GcpConfig(); + $gcpConfigAccessConfig = new AccessConfig(); + $accessConfigNetworkConfigs = []; + $gcpConfigAccessConfig->setNetworkConfigs($accessConfigNetworkConfigs); + $clusterGcpConfig->setAccessConfig($gcpConfigAccessConfig); + $cluster->setGcpConfig($clusterGcpConfig); + $request = (new CreateClusterRequest()) + ->setParent($formattedParent) + ->setClusterId($clusterId) + ->setCluster($cluster); + $response = $gapicClient->createCluster($request); + $this->assertFalse($response->isDone()); + $this->assertNull($response->getResult()); + $expectedOperationsRequestObject = new GetOperationRequest(); + $expectedOperationsRequestObject->setName('operations/createClusterTest'); + try { + $response->pollUntilComplete([ + 'initialPollDelayMillis' => 1, + ]); + // If the pollUntilComplete() method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stubs are exhausted + $transport->popReceivedCalls(); + $operationsTransport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + } + + /** @test */ + public function createTopicTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $name = 'name3373707'; + $partitionCount = 1738969222; + $replicationFactor = 1434332894; + $expectedResponse = new Topic(); + $expectedResponse->setName($name); + $expectedResponse->setPartitionCount($partitionCount); + $expectedResponse->setReplicationFactor($replicationFactor); + $transport->addResponse($expectedResponse); + // Mock request + $formattedParent = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $topicId = 'topicId-957291989'; + $topic = new Topic(); + $topicPartitionCount = 2129663148; + $topic->setPartitionCount($topicPartitionCount); + $topicReplicationFactor = 1954252084; + $topic->setReplicationFactor($topicReplicationFactor); + $request = (new CreateTopicRequest()) + ->setParent($formattedParent) + ->setTopicId($topicId) + ->setTopic($topic); + $response = $gapicClient->createTopic($request); + $this->assertEquals($expectedResponse, $response); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/CreateTopic', $actualFuncCall); + $actualValue = $actualRequestObject->getParent(); + $this->assertProtobufEquals($formattedParent, $actualValue); + $actualValue = $actualRequestObject->getTopicId(); + $this->assertProtobufEquals($topicId, $actualValue); + $actualValue = $actualRequestObject->getTopic(); + $this->assertProtobufEquals($topic, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function createTopicExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedParent = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $topicId = 'topicId-957291989'; + $topic = new Topic(); + $topicPartitionCount = 2129663148; + $topic->setPartitionCount($topicPartitionCount); + $topicReplicationFactor = 1954252084; + $topic->setReplicationFactor($topicReplicationFactor); + $request = (new CreateTopicRequest()) + ->setParent($formattedParent) + ->setTopicId($topicId) + ->setTopic($topic); + try { + $gapicClient->createTopic($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function deleteClusterTest() + { + $operationsTransport = $this->createTransport(); + $operationsClient = new OperationsClient([ + 'apiEndpoint' => '', + 'transport' => $operationsTransport, + 'credentials' => $this->createCredentials(), + ]); + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + 'operationsClient' => $operationsClient, + ]); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + // Mock response + $incompleteOperation = new Operation(); + $incompleteOperation->setName('operations/deleteClusterTest'); + $incompleteOperation->setDone(false); + $transport->addResponse($incompleteOperation); + $expectedResponse = new GPBEmpty(); + $anyResponse = new Any(); + $anyResponse->setValue($expectedResponse->serializeToString()); + $completeOperation = new Operation(); + $completeOperation->setName('operations/deleteClusterTest'); + $completeOperation->setDone(true); + $completeOperation->setResponse($anyResponse); + $operationsTransport->addResponse($completeOperation); + // Mock request + $formattedName = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new DeleteClusterRequest())->setName($formattedName); + $response = $gapicClient->deleteCluster($request); + $this->assertFalse($response->isDone()); + $this->assertNull($response->getResult()); + $apiRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($apiRequests)); + $operationsRequestsEmpty = $operationsTransport->popReceivedCalls(); + $this->assertSame(0, count($operationsRequestsEmpty)); + $actualApiFuncCall = $apiRequests[0]->getFuncCall(); + $actualApiRequestObject = $apiRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/DeleteCluster', $actualApiFuncCall); + $actualValue = $actualApiRequestObject->getName(); + $this->assertProtobufEquals($formattedName, $actualValue); + $expectedOperationsRequestObject = new GetOperationRequest(); + $expectedOperationsRequestObject->setName('operations/deleteClusterTest'); + $response->pollUntilComplete([ + 'initialPollDelayMillis' => 1, + ]); + $this->assertTrue($response->isDone()); + $this->assertEquals($expectedResponse, $response->getResult()); + $apiRequestsEmpty = $transport->popReceivedCalls(); + $this->assertSame(0, count($apiRequestsEmpty)); + $operationsRequests = $operationsTransport->popReceivedCalls(); + $this->assertSame(1, count($operationsRequests)); + $actualOperationsFuncCall = $operationsRequests[0]->getFuncCall(); + $actualOperationsRequestObject = $operationsRequests[0]->getRequestObject(); + $this->assertSame('/google.longrunning.Operations/GetOperation', $actualOperationsFuncCall); + $this->assertEquals($expectedOperationsRequestObject, $actualOperationsRequestObject); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + } + + /** @test */ + public function deleteClusterExceptionTest() + { + $operationsTransport = $this->createTransport(); + $operationsClient = new OperationsClient([ + 'apiEndpoint' => '', + 'transport' => $operationsTransport, + 'credentials' => $this->createCredentials(), + ]); + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + 'operationsClient' => $operationsClient, + ]); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + // Mock response + $incompleteOperation = new Operation(); + $incompleteOperation->setName('operations/deleteClusterTest'); + $incompleteOperation->setDone(false); + $transport->addResponse($incompleteOperation); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $operationsTransport->addResponse(null, $status); + // Mock request + $formattedName = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new DeleteClusterRequest())->setName($formattedName); + $response = $gapicClient->deleteCluster($request); + $this->assertFalse($response->isDone()); + $this->assertNull($response->getResult()); + $expectedOperationsRequestObject = new GetOperationRequest(); + $expectedOperationsRequestObject->setName('operations/deleteClusterTest'); + try { + $response->pollUntilComplete([ + 'initialPollDelayMillis' => 1, + ]); + // If the pollUntilComplete() method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stubs are exhausted + $transport->popReceivedCalls(); + $operationsTransport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + } + + /** @test */ + public function deleteConsumerGroupTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $expectedResponse = new GPBEmpty(); + $transport->addResponse($expectedResponse); + // Mock request + $formattedName = $gapicClient->consumerGroupName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[CONSUMER_GROUP]'); + $request = (new DeleteConsumerGroupRequest())->setName($formattedName); + $gapicClient->deleteConsumerGroup($request); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/DeleteConsumerGroup', $actualFuncCall); + $actualValue = $actualRequestObject->getName(); + $this->assertProtobufEquals($formattedName, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function deleteConsumerGroupExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedName = $gapicClient->consumerGroupName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[CONSUMER_GROUP]'); + $request = (new DeleteConsumerGroupRequest())->setName($formattedName); + try { + $gapicClient->deleteConsumerGroup($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function deleteTopicTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $expectedResponse = new GPBEmpty(); + $transport->addResponse($expectedResponse); + // Mock request + $formattedName = $gapicClient->topicName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[TOPIC]'); + $request = (new DeleteTopicRequest())->setName($formattedName); + $gapicClient->deleteTopic($request); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/DeleteTopic', $actualFuncCall); + $actualValue = $actualRequestObject->getName(); + $this->assertProtobufEquals($formattedName, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function deleteTopicExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedName = $gapicClient->topicName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[TOPIC]'); + $request = (new DeleteTopicRequest())->setName($formattedName); + try { + $gapicClient->deleteTopic($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getClusterTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $name2 = 'name2-1052831874'; + $expectedResponse = new Cluster(); + $expectedResponse->setName($name2); + $transport->addResponse($expectedResponse); + // Mock request + $formattedName = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new GetClusterRequest())->setName($formattedName); + $response = $gapicClient->getCluster($request); + $this->assertEquals($expectedResponse, $response); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/GetCluster', $actualFuncCall); + $actualValue = $actualRequestObject->getName(); + $this->assertProtobufEquals($formattedName, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getClusterExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedName = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new GetClusterRequest())->setName($formattedName); + try { + $gapicClient->getCluster($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getConsumerGroupTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $name2 = 'name2-1052831874'; + $expectedResponse = new ConsumerGroup(); + $expectedResponse->setName($name2); + $transport->addResponse($expectedResponse); + // Mock request + $formattedName = $gapicClient->consumerGroupName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[CONSUMER_GROUP]'); + $request = (new GetConsumerGroupRequest())->setName($formattedName); + $response = $gapicClient->getConsumerGroup($request); + $this->assertEquals($expectedResponse, $response); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/GetConsumerGroup', $actualFuncCall); + $actualValue = $actualRequestObject->getName(); + $this->assertProtobufEquals($formattedName, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getConsumerGroupExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedName = $gapicClient->consumerGroupName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[CONSUMER_GROUP]'); + $request = (new GetConsumerGroupRequest())->setName($formattedName); + try { + $gapicClient->getConsumerGroup($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getTopicTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $name2 = 'name2-1052831874'; + $partitionCount = 1738969222; + $replicationFactor = 1434332894; + $expectedResponse = new Topic(); + $expectedResponse->setName($name2); + $expectedResponse->setPartitionCount($partitionCount); + $expectedResponse->setReplicationFactor($replicationFactor); + $transport->addResponse($expectedResponse); + // Mock request + $formattedName = $gapicClient->topicName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[TOPIC]'); + $request = (new GetTopicRequest())->setName($formattedName); + $response = $gapicClient->getTopic($request); + $this->assertEquals($expectedResponse, $response); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/GetTopic', $actualFuncCall); + $actualValue = $actualRequestObject->getName(); + $this->assertProtobufEquals($formattedName, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getTopicExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedName = $gapicClient->topicName('[PROJECT]', '[LOCATION]', '[CLUSTER]', '[TOPIC]'); + $request = (new GetTopicRequest())->setName($formattedName); + try { + $gapicClient->getTopic($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listClustersTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $nextPageToken = ''; + $clustersElement = new Cluster(); + $clusters = [$clustersElement]; + $expectedResponse = new ListClustersResponse(); + $expectedResponse->setNextPageToken($nextPageToken); + $expectedResponse->setClusters($clusters); + $transport->addResponse($expectedResponse); + // Mock request + $formattedParent = $gapicClient->locationName('[PROJECT]', '[LOCATION]'); + $request = (new ListClustersRequest())->setParent($formattedParent); + $response = $gapicClient->listClusters($request); + $this->assertEquals($expectedResponse, $response->getPage()->getResponseObject()); + $resources = iterator_to_array($response->iterateAllElements()); + $this->assertSame(1, count($resources)); + $this->assertEquals($expectedResponse->getClusters()[0], $resources[0]); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/ListClusters', $actualFuncCall); + $actualValue = $actualRequestObject->getParent(); + $this->assertProtobufEquals($formattedParent, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listClustersExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedParent = $gapicClient->locationName('[PROJECT]', '[LOCATION]'); + $request = (new ListClustersRequest())->setParent($formattedParent); + try { + $gapicClient->listClusters($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listConsumerGroupsTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $nextPageToken = ''; + $consumerGroupsElement = new ConsumerGroup(); + $consumerGroups = [$consumerGroupsElement]; + $expectedResponse = new ListConsumerGroupsResponse(); + $expectedResponse->setNextPageToken($nextPageToken); + $expectedResponse->setConsumerGroups($consumerGroups); + $transport->addResponse($expectedResponse); + // Mock request + $formattedParent = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new ListConsumerGroupsRequest())->setParent($formattedParent); + $response = $gapicClient->listConsumerGroups($request); + $this->assertEquals($expectedResponse, $response->getPage()->getResponseObject()); + $resources = iterator_to_array($response->iterateAllElements()); + $this->assertSame(1, count($resources)); + $this->assertEquals($expectedResponse->getConsumerGroups()[0], $resources[0]); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/ListConsumerGroups', $actualFuncCall); + $actualValue = $actualRequestObject->getParent(); + $this->assertProtobufEquals($formattedParent, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listConsumerGroupsExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedParent = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new ListConsumerGroupsRequest())->setParent($formattedParent); + try { + $gapicClient->listConsumerGroups($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listTopicsTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $nextPageToken = ''; + $topicsElement = new Topic(); + $topics = [$topicsElement]; + $expectedResponse = new ListTopicsResponse(); + $expectedResponse->setNextPageToken($nextPageToken); + $expectedResponse->setTopics($topics); + $transport->addResponse($expectedResponse); + // Mock request + $formattedParent = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new ListTopicsRequest())->setParent($formattedParent); + $response = $gapicClient->listTopics($request); + $this->assertEquals($expectedResponse, $response->getPage()->getResponseObject()); + $resources = iterator_to_array($response->iterateAllElements()); + $this->assertSame(1, count($resources)); + $this->assertEquals($expectedResponse->getTopics()[0], $resources[0]); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/ListTopics', $actualFuncCall); + $actualValue = $actualRequestObject->getParent(); + $this->assertProtobufEquals($formattedParent, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listTopicsExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $formattedParent = $gapicClient->clusterName('[PROJECT]', '[LOCATION]', '[CLUSTER]'); + $request = (new ListTopicsRequest())->setParent($formattedParent); + try { + $gapicClient->listTopics($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function updateClusterTest() + { + $operationsTransport = $this->createTransport(); + $operationsClient = new OperationsClient([ + 'apiEndpoint' => '', + 'transport' => $operationsTransport, + 'credentials' => $this->createCredentials(), + ]); + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + 'operationsClient' => $operationsClient, + ]); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + // Mock response + $incompleteOperation = new Operation(); + $incompleteOperation->setName('operations/updateClusterTest'); + $incompleteOperation->setDone(false); + $transport->addResponse($incompleteOperation); + $name = 'name3373707'; + $expectedResponse = new Cluster(); + $expectedResponse->setName($name); + $anyResponse = new Any(); + $anyResponse->setValue($expectedResponse->serializeToString()); + $completeOperation = new Operation(); + $completeOperation->setName('operations/updateClusterTest'); + $completeOperation->setDone(true); + $completeOperation->setResponse($anyResponse); + $operationsTransport->addResponse($completeOperation); + // Mock request + $updateMask = new FieldMask(); + $cluster = new Cluster(); + $clusterCapacityConfig = new CapacityConfig(); + $capacityConfigVcpuCount = 1944563327; + $clusterCapacityConfig->setVcpuCount($capacityConfigVcpuCount); + $capacityConfigMemoryBytes = 743041454; + $clusterCapacityConfig->setMemoryBytes($capacityConfigMemoryBytes); + $cluster->setCapacityConfig($clusterCapacityConfig); + $clusterGcpConfig = new GcpConfig(); + $gcpConfigAccessConfig = new AccessConfig(); + $accessConfigNetworkConfigs = []; + $gcpConfigAccessConfig->setNetworkConfigs($accessConfigNetworkConfigs); + $clusterGcpConfig->setAccessConfig($gcpConfigAccessConfig); + $cluster->setGcpConfig($clusterGcpConfig); + $request = (new UpdateClusterRequest())->setUpdateMask($updateMask)->setCluster($cluster); + $response = $gapicClient->updateCluster($request); + $this->assertFalse($response->isDone()); + $this->assertNull($response->getResult()); + $apiRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($apiRequests)); + $operationsRequestsEmpty = $operationsTransport->popReceivedCalls(); + $this->assertSame(0, count($operationsRequestsEmpty)); + $actualApiFuncCall = $apiRequests[0]->getFuncCall(); + $actualApiRequestObject = $apiRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/UpdateCluster', $actualApiFuncCall); + $actualValue = $actualApiRequestObject->getUpdateMask(); + $this->assertProtobufEquals($updateMask, $actualValue); + $actualValue = $actualApiRequestObject->getCluster(); + $this->assertProtobufEquals($cluster, $actualValue); + $expectedOperationsRequestObject = new GetOperationRequest(); + $expectedOperationsRequestObject->setName('operations/updateClusterTest'); + $response->pollUntilComplete([ + 'initialPollDelayMillis' => 1, + ]); + $this->assertTrue($response->isDone()); + $this->assertEquals($expectedResponse, $response->getResult()); + $apiRequestsEmpty = $transport->popReceivedCalls(); + $this->assertSame(0, count($apiRequestsEmpty)); + $operationsRequests = $operationsTransport->popReceivedCalls(); + $this->assertSame(1, count($operationsRequests)); + $actualOperationsFuncCall = $operationsRequests[0]->getFuncCall(); + $actualOperationsRequestObject = $operationsRequests[0]->getRequestObject(); + $this->assertSame('/google.longrunning.Operations/GetOperation', $actualOperationsFuncCall); + $this->assertEquals($expectedOperationsRequestObject, $actualOperationsRequestObject); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + } + + /** @test */ + public function updateClusterExceptionTest() + { + $operationsTransport = $this->createTransport(); + $operationsClient = new OperationsClient([ + 'apiEndpoint' => '', + 'transport' => $operationsTransport, + 'credentials' => $this->createCredentials(), + ]); + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + 'operationsClient' => $operationsClient, + ]); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + // Mock response + $incompleteOperation = new Operation(); + $incompleteOperation->setName('operations/updateClusterTest'); + $incompleteOperation->setDone(false); + $transport->addResponse($incompleteOperation); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $operationsTransport->addResponse(null, $status); + // Mock request + $updateMask = new FieldMask(); + $cluster = new Cluster(); + $clusterCapacityConfig = new CapacityConfig(); + $capacityConfigVcpuCount = 1944563327; + $clusterCapacityConfig->setVcpuCount($capacityConfigVcpuCount); + $capacityConfigMemoryBytes = 743041454; + $clusterCapacityConfig->setMemoryBytes($capacityConfigMemoryBytes); + $cluster->setCapacityConfig($clusterCapacityConfig); + $clusterGcpConfig = new GcpConfig(); + $gcpConfigAccessConfig = new AccessConfig(); + $accessConfigNetworkConfigs = []; + $gcpConfigAccessConfig->setNetworkConfigs($accessConfigNetworkConfigs); + $clusterGcpConfig->setAccessConfig($gcpConfigAccessConfig); + $cluster->setGcpConfig($clusterGcpConfig); + $request = (new UpdateClusterRequest())->setUpdateMask($updateMask)->setCluster($cluster); + $response = $gapicClient->updateCluster($request); + $this->assertFalse($response->isDone()); + $this->assertNull($response->getResult()); + $expectedOperationsRequestObject = new GetOperationRequest(); + $expectedOperationsRequestObject->setName('operations/updateClusterTest'); + try { + $response->pollUntilComplete([ + 'initialPollDelayMillis' => 1, + ]); + // If the pollUntilComplete() method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stubs are exhausted + $transport->popReceivedCalls(); + $operationsTransport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + } + + /** @test */ + public function updateConsumerGroupTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $name = 'name3373707'; + $expectedResponse = new ConsumerGroup(); + $expectedResponse->setName($name); + $transport->addResponse($expectedResponse); + // Mock request + $updateMask = new FieldMask(); + $consumerGroup = new ConsumerGroup(); + $request = (new UpdateConsumerGroupRequest())->setUpdateMask($updateMask)->setConsumerGroup($consumerGroup); + $response = $gapicClient->updateConsumerGroup($request); + $this->assertEquals($expectedResponse, $response); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/UpdateConsumerGroup', $actualFuncCall); + $actualValue = $actualRequestObject->getUpdateMask(); + $this->assertProtobufEquals($updateMask, $actualValue); + $actualValue = $actualRequestObject->getConsumerGroup(); + $this->assertProtobufEquals($consumerGroup, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function updateConsumerGroupExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $updateMask = new FieldMask(); + $consumerGroup = new ConsumerGroup(); + $request = (new UpdateConsumerGroupRequest())->setUpdateMask($updateMask)->setConsumerGroup($consumerGroup); + try { + $gapicClient->updateConsumerGroup($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function updateTopicTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $name = 'name3373707'; + $partitionCount = 1738969222; + $replicationFactor = 1434332894; + $expectedResponse = new Topic(); + $expectedResponse->setName($name); + $expectedResponse->setPartitionCount($partitionCount); + $expectedResponse->setReplicationFactor($replicationFactor); + $transport->addResponse($expectedResponse); + // Mock request + $updateMask = new FieldMask(); + $topic = new Topic(); + $topicPartitionCount = 2129663148; + $topic->setPartitionCount($topicPartitionCount); + $topicReplicationFactor = 1954252084; + $topic->setReplicationFactor($topicReplicationFactor); + $request = (new UpdateTopicRequest())->setUpdateMask($updateMask)->setTopic($topic); + $response = $gapicClient->updateTopic($request); + $this->assertEquals($expectedResponse, $response); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/UpdateTopic', $actualFuncCall); + $actualValue = $actualRequestObject->getUpdateMask(); + $this->assertProtobufEquals($updateMask, $actualValue); + $actualValue = $actualRequestObject->getTopic(); + $this->assertProtobufEquals($topic, $actualValue); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function updateTopicExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + // Mock request + $updateMask = new FieldMask(); + $topic = new Topic(); + $topicPartitionCount = 2129663148; + $topic->setPartitionCount($topicPartitionCount); + $topicReplicationFactor = 1954252084; + $topic->setReplicationFactor($topicReplicationFactor); + $request = (new UpdateTopicRequest())->setUpdateMask($updateMask)->setTopic($topic); + try { + $gapicClient->updateTopic($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getLocationTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $name2 = 'name2-1052831874'; + $locationId = 'locationId552319461'; + $displayName = 'displayName1615086568'; + $expectedResponse = new Location(); + $expectedResponse->setName($name2); + $expectedResponse->setLocationId($locationId); + $expectedResponse->setDisplayName($displayName); + $transport->addResponse($expectedResponse); + $request = new GetLocationRequest(); + $response = $gapicClient->getLocation($request); + $this->assertEquals($expectedResponse, $response); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.location.Locations/GetLocation', $actualFuncCall); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function getLocationExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + $request = new GetLocationRequest(); + try { + $gapicClient->getLocation($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listLocationsTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + // Mock response + $nextPageToken = ''; + $locationsElement = new Location(); + $locations = [$locationsElement]; + $expectedResponse = new ListLocationsResponse(); + $expectedResponse->setNextPageToken($nextPageToken); + $expectedResponse->setLocations($locations); + $transport->addResponse($expectedResponse); + $request = new ListLocationsRequest(); + $response = $gapicClient->listLocations($request); + $this->assertEquals($expectedResponse, $response->getPage()->getResponseObject()); + $resources = iterator_to_array($response->iterateAllElements()); + $this->assertSame(1, count($resources)); + $this->assertEquals($expectedResponse->getLocations()[0], $resources[0]); + $actualRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($actualRequests)); + $actualFuncCall = $actualRequests[0]->getFuncCall(); + $actualRequestObject = $actualRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.location.Locations/ListLocations', $actualFuncCall); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function listLocationsExceptionTest() + { + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + ]); + $this->assertTrue($transport->isExhausted()); + $status = new stdClass(); + $status->code = Code::DATA_LOSS; + $status->details = 'internal error'; + $expectedExceptionMessage = json_encode( + [ + 'message' => 'internal error', + 'code' => Code::DATA_LOSS, + 'status' => 'DATA_LOSS', + 'details' => [], + ], + JSON_PRETTY_PRINT + ); + $transport->addResponse(null, $status); + $request = new ListLocationsRequest(); + try { + $gapicClient->listLocations($request); + // If the $gapicClient method call did not throw, fail the test + $this->fail('Expected an ApiException, but no exception was thrown.'); + } catch (ApiException $ex) { + $this->assertEquals($status->code, $ex->getCode()); + $this->assertEquals($expectedExceptionMessage, $ex->getMessage()); + } + // Call popReceivedCalls to ensure the stub is exhausted + $transport->popReceivedCalls(); + $this->assertTrue($transport->isExhausted()); + } + + /** @test */ + public function createClusterAsyncTest() + { + $operationsTransport = $this->createTransport(); + $operationsClient = new OperationsClient([ + 'apiEndpoint' => '', + 'transport' => $operationsTransport, + 'credentials' => $this->createCredentials(), + ]); + $transport = $this->createTransport(); + $gapicClient = $this->createClient([ + 'transport' => $transport, + 'operationsClient' => $operationsClient, + ]); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + // Mock response + $incompleteOperation = new Operation(); + $incompleteOperation->setName('operations/createClusterTest'); + $incompleteOperation->setDone(false); + $transport->addResponse($incompleteOperation); + $name = 'name3373707'; + $expectedResponse = new Cluster(); + $expectedResponse->setName($name); + $anyResponse = new Any(); + $anyResponse->setValue($expectedResponse->serializeToString()); + $completeOperation = new Operation(); + $completeOperation->setName('operations/createClusterTest'); + $completeOperation->setDone(true); + $completeOperation->setResponse($anyResponse); + $operationsTransport->addResponse($completeOperation); + // Mock request + $formattedParent = $gapicClient->locationName('[PROJECT]', '[LOCATION]'); + $clusterId = 'clusterId240280960'; + $cluster = new Cluster(); + $clusterCapacityConfig = new CapacityConfig(); + $capacityConfigVcpuCount = 1944563327; + $clusterCapacityConfig->setVcpuCount($capacityConfigVcpuCount); + $capacityConfigMemoryBytes = 743041454; + $clusterCapacityConfig->setMemoryBytes($capacityConfigMemoryBytes); + $cluster->setCapacityConfig($clusterCapacityConfig); + $clusterGcpConfig = new GcpConfig(); + $gcpConfigAccessConfig = new AccessConfig(); + $accessConfigNetworkConfigs = []; + $gcpConfigAccessConfig->setNetworkConfigs($accessConfigNetworkConfigs); + $clusterGcpConfig->setAccessConfig($gcpConfigAccessConfig); + $cluster->setGcpConfig($clusterGcpConfig); + $request = (new CreateClusterRequest()) + ->setParent($formattedParent) + ->setClusterId($clusterId) + ->setCluster($cluster); + $response = $gapicClient->createClusterAsync($request)->wait(); + $this->assertFalse($response->isDone()); + $this->assertNull($response->getResult()); + $apiRequests = $transport->popReceivedCalls(); + $this->assertSame(1, count($apiRequests)); + $operationsRequestsEmpty = $operationsTransport->popReceivedCalls(); + $this->assertSame(0, count($operationsRequestsEmpty)); + $actualApiFuncCall = $apiRequests[0]->getFuncCall(); + $actualApiRequestObject = $apiRequests[0]->getRequestObject(); + $this->assertSame('/google.cloud.managedkafka.v1.ManagedKafka/CreateCluster', $actualApiFuncCall); + $actualValue = $actualApiRequestObject->getParent(); + $this->assertProtobufEquals($formattedParent, $actualValue); + $actualValue = $actualApiRequestObject->getClusterId(); + $this->assertProtobufEquals($clusterId, $actualValue); + $actualValue = $actualApiRequestObject->getCluster(); + $this->assertProtobufEquals($cluster, $actualValue); + $expectedOperationsRequestObject = new GetOperationRequest(); + $expectedOperationsRequestObject->setName('operations/createClusterTest'); + $response->pollUntilComplete([ + 'initialPollDelayMillis' => 1, + ]); + $this->assertTrue($response->isDone()); + $this->assertEquals($expectedResponse, $response->getResult()); + $apiRequestsEmpty = $transport->popReceivedCalls(); + $this->assertSame(0, count($apiRequestsEmpty)); + $operationsRequests = $operationsTransport->popReceivedCalls(); + $this->assertSame(1, count($operationsRequests)); + $actualOperationsFuncCall = $operationsRequests[0]->getFuncCall(); + $actualOperationsRequestObject = $operationsRequests[0]->getRequestObject(); + $this->assertSame('/google.longrunning.Operations/GetOperation', $actualOperationsFuncCall); + $this->assertEquals($expectedOperationsRequestObject, $actualOperationsRequestObject); + $this->assertTrue($transport->isExhausted()); + $this->assertTrue($operationsTransport->isExhausted()); + } +} diff --git a/composer.json b/composer.json index 7a1176c4d5b0..44eae7d2bb96 100644 --- a/composer.json +++ b/composer.json @@ -172,6 +172,7 @@ "google/cloud-life-sciences": "0.6.5", "google/cloud-logging": "1.30.2", "google/cloud-managed-identities": "1.3.5", + "google/cloud-managedkafka": "0.0.0", "google/cloud-media-translation": "0.4.4", "google/cloud-memcache": "1.3.5", "google/cloud-migrationcenter": "0.4.5", @@ -359,6 +360,7 @@ "GPBMetadata\\Google\\Cloud\\Language\\": "Language/metadata", "GPBMetadata\\Google\\Cloud\\Lifesciences\\": "LifeSciences/metadata", "GPBMetadata\\Google\\Cloud\\Managedidentities\\": "ManagedIdentities/metadata", + "GPBMetadata\\Google\\Cloud\\Managedkafka\\": "ManagedKafka/metadata", "GPBMetadata\\Google\\Cloud\\Mediatranslation\\": "MediaTranslation/metadata", "GPBMetadata\\Google\\Cloud\\Memcache\\": "Memcache/metadata", "GPBMetadata\\Google\\Cloud\\Metastore\\": "DataprocMetastore/metadata", @@ -572,6 +574,7 @@ "Google\\Cloud\\LifeSciences\\": "LifeSciences/src", "Google\\Cloud\\Logging\\": "Logging/src", "Google\\Cloud\\ManagedIdentities\\": "ManagedIdentities/src", + "Google\\Cloud\\ManagedKafka\\": "ManagedKafka/src", "Google\\Cloud\\MediaTranslation\\": "MediaTranslation/src", "Google\\Cloud\\Memcache\\": "Memcache/src", "Google\\Cloud\\Metastore\\": "DataprocMetastore/src",