From bd52f5bbb35181cd7f6a3e5ffdab8a784f6b9aa8 Mon Sep 17 00:00:00 2001 From: Schahin Rouhanizadeh Date: Wed, 28 Aug 2024 16:45:15 +0200 Subject: [PATCH] Add `nativelink-bridge` This is a fast prototype for subscribing to the redis/dragonflydb "build_events" channel and decode them properly via protobuf and send them via websocket to the browser. It's a plain prototype without any error handling and formatting on the browser --- .../examples/basic_cas_test_conf.json | 178 ++ .../build/v1/build_event_stream.proto | 1440 +++++++++++++++++ .../build/v1/google/api/annotations.proto | 31 + .../devtools/build/v1/google/api/client.proto | 99 ++ .../build/v1/google/api/field_behavior.proto | 104 ++ .../build/v1/google/api/google/api/http.proto | 388 +++++ .../api/google/protobuf/descriptor.proto | 909 +++++++++++ .../devtools/build/v1/google/api/http.proto | 388 +++++ .../v1/src/main/protobuf/action_cache.proto | 63 + .../v1/src/main/protobuf/command_line.proto | 102 ++ .../src/main/protobuf/failure_details.proto | 1364 ++++++++++++++++ .../protobuf/google/protobuf/descriptor.proto | 909 +++++++++++ .../src/main/protobuf/invocation_policy.proto | 207 +++ .../src/main/protobuf/option_filters.proto | 61 + .../src/main/protobuf/strategy_policy.proto | 67 + tools/pre-commit-hooks.nix | 1 + web/app/src/pages/app/index.astro | 50 + web/bridge/.gitignore | 175 ++ web/bridge/README.md | 74 + web/bridge/bun.lockb | Bin 0 -> 11334 bytes web/bridge/index.ts | 33 + web/bridge/package.json | 15 + web/bridge/src/eventHandler.ts | 111 ++ web/bridge/src/protobuf.ts | 19 + web/bridge/src/redis.ts | 23 + web/bridge/src/utils.ts | 20 + web/bridge/src/websocket.ts | 44 + web/bridge/tsconfig.json | 27 + 28 files changed, 6902 insertions(+) create mode 100644 nativelink-config/examples/basic_cas_test_conf.json create mode 100644 nativelink-proto/google/devtools/build/v1/build_event_stream.proto create mode 100644 nativelink-proto/google/devtools/build/v1/google/api/annotations.proto create mode 100644 nativelink-proto/google/devtools/build/v1/google/api/client.proto create mode 100644 nativelink-proto/google/devtools/build/v1/google/api/field_behavior.proto create mode 100644 nativelink-proto/google/devtools/build/v1/google/api/google/api/http.proto create mode 100644 nativelink-proto/google/devtools/build/v1/google/api/google/protobuf/descriptor.proto create mode 100644 nativelink-proto/google/devtools/build/v1/google/api/http.proto create mode 100644 nativelink-proto/google/devtools/build/v1/src/main/protobuf/action_cache.proto create mode 100644 nativelink-proto/google/devtools/build/v1/src/main/protobuf/command_line.proto create mode 100644 nativelink-proto/google/devtools/build/v1/src/main/protobuf/failure_details.proto create mode 100644 nativelink-proto/google/devtools/build/v1/src/main/protobuf/google/protobuf/descriptor.proto create mode 100644 nativelink-proto/google/devtools/build/v1/src/main/protobuf/invocation_policy.proto create mode 100644 nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/option_filters.proto create mode 100644 nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/strategy_policy.proto create mode 100644 web/app/src/pages/app/index.astro create mode 100644 web/bridge/.gitignore create mode 100644 web/bridge/README.md create mode 100755 web/bridge/bun.lockb create mode 100644 web/bridge/index.ts create mode 100644 web/bridge/package.json create mode 100644 web/bridge/src/eventHandler.ts create mode 100644 web/bridge/src/protobuf.ts create mode 100644 web/bridge/src/redis.ts create mode 100644 web/bridge/src/utils.ts create mode 100644 web/bridge/src/websocket.ts create mode 100644 web/bridge/tsconfig.json diff --git a/nativelink-config/examples/basic_cas_test_conf.json b/nativelink-config/examples/basic_cas_test_conf.json new file mode 100644 index 0000000000..78cb83cce8 --- /dev/null +++ b/nativelink-config/examples/basic_cas_test_conf.json @@ -0,0 +1,178 @@ +{ + "stores": { + "AC_MAIN_STORE": { + "filesystem": { + "content_path": "/tmp/nativelink/data-worker-test/content_path-ac", + "temp_path": "/tmp/nativelink/data-worker-test/tmp_path-ac", + "eviction_policy": { + "max_bytes": 100000000000 + } + } + }, + "BEP_STORE": { + + "redis_store": { + "addresses": [ + "redis://@localhost:6379/0" + ], + "response_timeout_s": 42, + "connection_timeout_s": 42, + "experimental_pub_sub_channel": "build_event", + "key_prefix": "nativelink.", + "mode": "standard" + } + }, + "WORKER_FAST_SLOW_STORE": { + "fast_slow": { + "fast": { + "filesystem": { + "content_path": "/tmp/nativelink/data-worker-test/content_path-cas", + "temp_path": "/tmp/nativelink/data-worker-test/tmp_path-cas", + "eviction_policy": { + "max_bytes": 100000000000 + } + } + }, + "slow": { + "noop": {} + } + } + } + }, + "schedulers": { + "MAIN_SCHEDULER": { + "simple": { + "supported_platform_properties": { + "cpu_count": "minimum", + "memory_kb": "minimum", + "network_kbps": "minimum", + "disk_read_iops": "minimum", + "disk_read_bps": "minimum", + "disk_write_iops": "minimum", + "disk_write_bps": "minimum", + "shm_size": "minimum", + "gpu_count": "minimum", + "gpu_model": "exact", + "cpu_vendor": "exact", + "cpu_arch": "exact", + "cpu_model": "exact", + "kernel_version": "exact", + "OSFamily": "priority", + "container-image": "priority" + } + } + } + }, + "workers": [ + { + "local": { + "worker_api_endpoint": { + "uri": "grpc://127.0.0.1:50061" + }, + "cas_fast_slow_store": "WORKER_FAST_SLOW_STORE", + "upload_action_result": { + "ac_store": "AC_MAIN_STORE" + }, + "work_directory": "/tmp/nativelink/work", + "platform_properties": { + "cpu_count": { + "values": [ + "16" + ] + }, + "memory_kb": { + "values": [ + "500000" + ] + }, + "network_kbps": { + "values": [ + "100000" + ] + }, + "cpu_arch": { + "values": [ + "x86_64" + ] + }, + "OSFamily": { + "values": [ + "" + ] + }, + "container-image": { + "values": [ + "" + ] + } + } + } + } + ], + "servers": [ + { + "name": "public", + "listener": { + "http": { + "socket_address": "0.0.0.0:50051" + } + }, + "services": { + "cas": { + "main": { + "cas_store": "WORKER_FAST_SLOW_STORE" + } + }, + "ac": { + "main": { + "ac_store": "AC_MAIN_STORE" + } + }, + "execution": { + "main": { + "cas_store": "WORKER_FAST_SLOW_STORE", + "scheduler": "MAIN_SCHEDULER" + } + }, + "capabilities": { + "main": { + "remote_execution": { + "scheduler": "MAIN_SCHEDULER" + } + } + }, + "bytestream": { + "cas_stores": { + "main": "WORKER_FAST_SLOW_STORE" + } + } + } + }, + { + "name": "private_workers_servers", + "listener": { + "http": { + "socket_address": "0.0.0.0:50061" + } + }, + "services": { + "experimental_prometheus": { + "path": "/metrics" + }, + "experimental_bep": { + "store": "BEP_STORE" + }, + "worker_api": { + "scheduler": "MAIN_SCHEDULER" + }, + "admin": {}, + "health": { + "path": "/status" + } + } + } + ], + "global": { + "max_open_files": 512 + } +} diff --git a/nativelink-proto/google/devtools/build/v1/build_event_stream.proto b/nativelink-proto/google/devtools/build/v1/build_event_stream.proto new file mode 100644 index 0000000000..c274f0dd8b --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/build_event_stream.proto @@ -0,0 +1,1440 @@ +// Copyright 2016 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// LINT: LEGACY_NAMES + +syntax = "proto3"; + +package build_event_stream; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "src/main/protobuf/action_cache.proto"; +import "src/main/protobuf/command_line.proto"; +import "src/main/protobuf/failure_details.proto"; +import "src/main/protobuf/invocation_policy.proto"; + +option java_package = "com.google.devtools.build.lib.buildeventstream"; +option java_outer_classname = "BuildEventStreamProtos"; + +// Identifier for a build event. It is deliberately structured to also provide +// information about which build target etc the event is related to. +// +// Events are chained via the event id as follows: each event has an id and a +// set of ids of children events such that apart from the initial event each +// event has an id that is mentioned as child id in an earlier event and a build +// invocation is complete if and only if all direct and indirect children of the +// initial event have been posted. +message BuildEventId { + // Generic identifier for a build event. This is the default type of + // BuildEventId, but should not be used outside testing; nevertheless, + // tools should handle build events with this kind of id gracefully. + message UnknownBuildEventId { + string details = 1; + } + + // Identifier of an event reporting progress. Those events are also used to + // chain in events that come early. + message ProgressId { + // Unique identifier. No assumption should be made about how the ids are + // assigned; the only meaningful operation on this field is test for + // equality. + int32 opaque_count = 1; + } + + // Identifier of an event indicating the beginning of a build; this will + // normally be the first event. + message BuildStartedId {} + + // Identifier on an event indicating the original commandline received by + // the bazel server. + message UnstructuredCommandLineId {} + + // Identifier on an event describing the commandline received by Bazel. + message StructuredCommandLineId { + // A title for this command line value, as there may be multiple. + // For example, a single invocation may wish to report both the literal and + // canonical command lines, and this label would be used to differentiate + // between both versions. + string command_line_label = 1; + } + + // Identifier of an event indicating the workspace status. + message WorkspaceStatusId {} + + // Identifier on an event reporting on the options included in the command + // line, both explicitly and implicitly. + message OptionsParsedId {} + + // Identifier of an event reporting that an external resource was fetched + // from. + message FetchId { + // The external resource that was fetched from. + string url = 1; + } + + // Identifier of an event indicating that a target pattern has been expanded + // further. + // Messages of this shape are also used to describe parts of a pattern that + // have been skipped for some reason, if the actual expansion was still + // carried out (e.g., if keep_going is set). In this case, the + // pattern_skipped choice in the id field is to be made. + message PatternExpandedId { + repeated string pattern = 1; + } + + message WorkspaceConfigId {} + + message BuildMetadataId {} + + // Identifier of an event indicating that a target has been expanded by + // identifying for which configurations it should be build. + message TargetConfiguredId { + string label = 1; + + // If empty, the id refers to the expansion of the target. If not-empty, + // the id refers to the expansion of an aspect applied to the (already + // expanded) target. + // + // For example, when building an apple_binary that depends on proto_library + // "//:foo_proto", there will be two TargetConfigured events for + // "//:foo_proto": + // + // 1. An event with an empty aspect, corresponding to actions producing + // language-agnostic outputs from the proto_library; and + // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C + // code generation. + string aspect = 2; + } + + // Identifier of an event introducing a named set of files (usually artifacts) + // to be referred to in later messages. + message NamedSetOfFilesId { + // Identifier of the file set; this is an opaque string valid only for the + // particular instance of the event stream. + string id = 1; + } + + // Identifier of an event introducing a configuration. + message ConfigurationId { + // Identifier of the configuration; users of the protocol should not make + // any assumptions about it having any structure, or equality of the + // identifier between different streams. + // + // A value of "none" means the null configuration. It is used for targets + // that are not configurable, for example, source files. + string id = 1; + } + + // Identifier of an event indicating that a target was built completely; this + // does not include running the test if the target is a test target. + message TargetCompletedId { + string label = 1; + + // The configuration for which the target was built. + ConfigurationId configuration = 3; + + // If empty, the id refers to the completion of the target. If not-empty, + // the id refers to the completion of an aspect applied to the (already + // completed) target. + // + // For example, when building an apple_binary that depends on proto_library + // "//:foo_proto", there will be two TargetCompleted events for + // "//:foo_proto": + // + // 1. An event with an empty aspect, corresponding to actions producing + // language-agnostic outputs from the proto_library; and + // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C + // code generation. + string aspect = 2; + } + + // Identifier of an event reporting that an action was completed (not all + // actions are reported, only the ones that can be considered important; + // this includes all failed actions). + message ActionCompletedId { + string primary_output = 1; + // Optional, the label of the owner of the action, for reference. + string label = 2; + // Optional, the id of the configuration of the action owner. + ConfigurationId configuration = 3; + } + + // Identifier of an event reporting an event associated with an unconfigured + // label. Usually, this indicates a failure due to a missing input file. In + // any case, it will report some form of error (i.e., the payload will be an + // Aborted event); there are no regular events using this identifier. The + // purpose of those events is to serve as the root cause of a failed target. + message UnconfiguredLabelId { + string label = 1; + } + + // Identifier of an event reporting an event associated with a configured + // label, usually a visibility error. In any case, an event with such an + // id will always report some form of error (i.e., the payload will be an + // Aborted event); there are no regular events using this identifier. + message ConfiguredLabelId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of an event reporting on an individual test run. The label + // identifies the test that is reported about, the remaining fields are + // in such a way as to uniquely identify the action within a build. In fact, + // attempts for the same test, run, shard triple are counted sequentially, + // starting with 1. + message TestResultId { + string label = 1; + ConfigurationId configuration = 5; + int32 run = 2; + int32 shard = 3; + int32 attempt = 4; + } + + // Identifier of an event reporting progress of an individual test run. + message TestProgressId { + // The label of the target for the action. + string label = 1; + // The configuration under which the action is running. + ConfigurationId configuration = 2; + // The run number of the test action (e.g. for runs_per_test > 1). + int32 run = 3; + // For sharded tests, the shard number of the test action. + int32 shard = 4; + // The execution attempt number which may increase due to retries (e.g. for + // flaky tests). + int32 attempt = 5; + // An incrementing count used to differentiate TestProgressIds for the same + // test attempt. + int32 opaque_count = 6; + } + + // Identifier of an event reporting the summary of a test. + message TestSummaryId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of an event reporting the summary of a target. + message TargetSummaryId { + string label = 1; + ConfigurationId configuration = 2; + } + + // Identifier of the BuildFinished event, indicating the end of a build. + message BuildFinishedId {} + + // Identifier of an event providing additional logs/statistics after + // completion of the build. + message BuildToolLogsId {} + + // Identifier of an event providing build metrics after completion + // of the build. + message BuildMetricsId {} + + // Identifier of an event providing convenience symlinks information. + message ConvenienceSymlinksIdentifiedId {} + + // Identifier of an event providing the ExecRequest of a run command. + message ExecRequestId {} + + reserved 27; + + oneof id { + UnknownBuildEventId unknown = 1; + ProgressId progress = 2; + BuildStartedId started = 3; + UnstructuredCommandLineId unstructured_command_line = 11; + StructuredCommandLineId structured_command_line = 18; + WorkspaceStatusId workspace_status = 14; + OptionsParsedId options_parsed = 12; + FetchId fetch = 17; + ConfigurationId configuration = 15; + TargetConfiguredId target_configured = 16; + PatternExpandedId pattern = 4; + PatternExpandedId pattern_skipped = 10; + NamedSetOfFilesId named_set = 13; + TargetCompletedId target_completed = 5; + ActionCompletedId action_completed = 6; + UnconfiguredLabelId unconfigured_label = 19; + ConfiguredLabelId configured_label = 21; + TestResultId test_result = 8; + TestProgressId test_progress = 29; + TestSummaryId test_summary = 7; + TargetSummaryId target_summary = 26; + BuildFinishedId build_finished = 9; + BuildToolLogsId build_tool_logs = 20; + BuildMetricsId build_metrics = 22; + WorkspaceConfigId workspace = 23; + BuildMetadataId build_metadata = 24; + ConvenienceSymlinksIdentifiedId convenience_symlinks_identified = 25; + ExecRequestId exec_request = 28; + } +} + +// Payload of an event summarizing the progress of the build so far. Those +// events are also used to be parents of events where the more logical parent +// event cannot be posted yet as the needed information is not yet complete. +message Progress { + // The next chunk of stdout that bazel produced since the last progress event + // or the beginning of the build. + // Consumers that need to reason about the relative order of stdout and stderr + // can assume that stderr has been emitted before stdout if both are present, + // on a best-effort basis. + string stdout = 1; + + // The next chunk of stderr that bazel produced since the last progress event + // or the beginning of the build. + // Consumers that need to reason about the relative order of stdout and stderr + // can assume that stderr has been emitted before stdout if both are present, + // on a best-effort basis. + string stderr = 2; +} + +// Payload of an event indicating that an expected event will not come, as +// the build is aborted prematurely for some reason. +message Aborted { + enum AbortReason { + UNKNOWN = 0; + + // The user requested the build to be aborted (e.g., by hitting Ctl-C). + USER_INTERRUPTED = 1; + + // The user requested that no analysis be performed. + NO_ANALYZE = 8; + + // The user requested that no build be carried out. + NO_BUILD = 9; + + // The build or target was aborted as a timeout was exceeded. + TIME_OUT = 2; + + // The build or target was aborted as some remote environment (e.g., for + // remote execution of actions) was not available in the expected way. + REMOTE_ENVIRONMENT_FAILURE = 3; + + // Failure due to reasons entirely internal to the build tool, i.e. an + // unexpected crash due to programmer error. + INTERNAL = 4; + + // A Failure occurred in the loading phase of a target. + LOADING_FAILURE = 5; + + // A Failure occurred in the analysis phase of a target. + ANALYSIS_FAILURE = 6; + + // Target build was skipped (e.g. due to incompatible CPU constraints). + SKIPPED = 7; + + // Build incomplete due to an earlier build failure (e.g. --keep_going was + // set to false causing the build be ended upon failure). + INCOMPLETE = 10; + + // The build tool ran out of memory and crashed. + OUT_OF_MEMORY = 11; + } + AbortReason reason = 1; + + // A human readable description with more details about there reason, where + // available and useful. + string description = 2; +} + +// Payload of an event indicating the beginning of a new build. Usually, events +// of those type start a new build-event stream. The target pattern requested +// to be build is contained in one of the announced child events; it is an +// invariant that precisely one of the announced child events has a non-empty +// target pattern. +message BuildStarted { + string uuid = 1; + + // Start of the build in ms since the epoch. + // + // Deprecated, use `start_time` instead. + // + // TODO(yannic): Remove. + int64 start_time_millis = 2 [deprecated = true]; + + // Start of the build. + google.protobuf.Timestamp start_time = 9; + + // Version of the build tool that is running. + string build_tool_version = 3; + + // A human-readable description of all the non-default option settings + string options_description = 4; + + // The name of the command that the user invoked. + string command = 5; + + // The working directory from which the build tool was invoked. + string working_directory = 6; + + // The directory of the workspace. + string workspace_directory = 7; + + // The process ID of the Bazel server. + int64 server_pid = 8; +} + +// Configuration related to the blaze workspace and output tree. +message WorkspaceConfig { + // The root of the local blaze exec root. All output files live underneath + // this at "blaze-out/". + string local_exec_root = 1; +} + +// Payload of an event reporting the command-line of the invocation as +// originally received by the server. Note that this is not the command-line +// given by the user, as the client adds information about the invocation, +// like name and relevant entries of rc-files and client environment variables. +// However, it does contain enough information to reproduce the build +// invocation. +message UnstructuredCommandLine { + repeated string args = 1; +} + +// Payload of an event reporting on the parsed options, grouped in various ways. +message OptionsParsed { + repeated string startup_options = 1; + repeated string explicit_startup_options = 2; + repeated string cmd_line = 3; + repeated string explicit_cmd_line = 4; + blaze.invocation_policy.InvocationPolicy invocation_policy = 5; + string tool_tag = 6; +} + +// Payload of an event indicating that an external resource was fetched. This +// event will only occur in streams where an actual fetch happened, not in ones +// where a cached copy of the entity to be fetched was used. +message Fetch { + bool success = 1; +} + +// Payload of an event reporting the workspace status. Key-value pairs can be +// provided by specifying the workspace_status_command to an executable that +// returns one key-value pair per line of output (key and value separated by a +// space). +message WorkspaceStatus { + message Item { + string key = 1; + string value = 2; + } + repeated Item item = 1; +} + +// Payload of an event reporting custom key-value metadata associated with the +// build. +message BuildMetadata { + // Custom metadata for the build. + map metadata = 1 + ; +} + +// Payload of an event reporting details of a given configuration. +message Configuration { + string mnemonic = 1; + string platform_name = 2; + string cpu = 3; + map make_variable = 4 + ; + // Whether this configuration is used for building tools. + bool is_tool = 5; +} + +// Payload of the event indicating the expansion of a target pattern. +// The main information is in the chaining part: the id will contain the +// target pattern that was expanded and the children id will contain the +// target or target pattern it was expanded to. +message PatternExpanded { + // Represents a test_suite target and the tests that it expanded to. Nested + // test suites are recursively expanded. The test labels only contain the + // final test targets, not any nested suites. + message TestSuiteExpansion { + // The label of the test_suite rule. + string suite_label = 1; + // Labels of the test targets included in the suite. Includes all tests in + // the suite regardless of any filters or negative patterns which may result + // in the test not actually being run. + repeated string test_labels = 2; + } + + // All test suites requested via top-level target patterns. Does not include + // test suites whose label matched a negative pattern. + repeated TestSuiteExpansion test_suite_expansions = 1; +} + +// Enumeration type characterizing the size of a test, as specified by the +// test rule. +enum TestSize { + UNKNOWN = 0; + SMALL = 1; + MEDIUM = 2; + LARGE = 3; + ENORMOUS = 4; +} + +// Payload of the event indicating that the configurations for a target have +// been identified. As with pattern expansion the main information is in the +// chaining part: the id will contain the target that was configured and the +// children id will contain the configured targets it was configured to. +message TargetConfigured { + // The kind of target (e.g., e.g. "cc_library rule", "source file", + // "generated file") where the completion is reported. + string target_kind = 1; + + // The size of the test, if the target is a test target. Unset otherwise. + TestSize test_size = 2; + + // List of all tags associated with this target (for all possible + // configurations). + repeated string tag = 3; +} + +message File { + // A sequence of prefixes to apply to the file name to construct a full path. + // In most but not all cases, there will be 3 entries: + // 1. A root output directory, eg "bazel-out" + // 2. A configuration mnemonic, eg "k8-fastbuild" + // 3. An output category, eg "genfiles" + repeated string path_prefix = 4; + + // identifier indicating the nature of the file (e.g., "stdout", "stderr") + string name = 1; + + oneof file { + // A location where the contents of the file can be found. The string is + // encoded according to RFC2396. + string uri = 2; + // The contents of the file, if they are guaranteed to be short. + bytes contents = 3; + // The symlink target path, if the file is an unresolved symlink. + string symlink_target_path = 7; + } + + // Digest of the file, using the build tool's configured digest algorithm, + // hex-encoded. + string digest = 5; + + // Length of the file in bytes. + int64 length = 6; +} + +// Payload of a message to describe a set of files, usually build artifacts, to +// be referred to later by their name. In this way, files that occur identically +// as outputs of several targets have to be named only once. +message NamedSetOfFiles { + // Files that belong to this named set of files. + repeated File files = 1; + + // Other named sets whose members also belong to this set. + repeated BuildEventId.NamedSetOfFilesId file_sets = 2; +} + +// Payload of the event indicating the completion of an action. The main purpose +// of posting those events is to provide details on the root cause for a target +// failing; however, consumers of the build-event protocol must not assume +// that only failed actions are posted. +message ActionExecuted { + bool success = 1; + + // The mnemonic of the action that was executed + string type = 8; + + // The exit code of the action, if it is available. + int32 exit_code = 2; + + // Location where to find the standard output of the action + // (e.g., a file path). + File stdout = 3; + + // Location where to find the standard error of the action + // (e.g., a file path). + File stderr = 4; + + // Deprecated. This field is now present on ActionCompletedId. + string label = 5 [deprecated = true]; + + // Deprecated. This field is now present on ActionCompletedId. + BuildEventId.ConfigurationId configuration = 7 [deprecated = true]; + + // Primary output; only provided for successful actions. + File primary_output = 6; + + // The command-line of the action, if the action is a command. + repeated string command_line = 9; + + // List of paths to log files + repeated File action_metadata_logs = 10; + + // Only populated if success = false, and sometimes not even then. + failure_details.FailureDetail failure_detail = 11; + + // Start of action execution, before any attempted execution begins. + google.protobuf.Timestamp start_time = 12; + + // End of action execution, after all attempted execution completes. + google.protobuf.Timestamp end_time = 13; + + // Additional details about action execution supplied by strategies. Bazel + // options will determine which strategy details are included when multiple + // strategies are involved in a single action's execution. + // + // The default type will be `tools.proto.SpawnExec` found in `spawn.proto`. + repeated google.protobuf.Any strategy_details = 14; +} + +// Collection of all output files belonging to that output group. +message OutputGroup { + // Ids of fields that have been removed. + reserved 2; + + // Name of the output group + string name = 1; + + // List of file sets that belong to this output group as well. + repeated BuildEventId.NamedSetOfFilesId file_sets = 3; + + // Indicates that one or more of the output group's files were not built + // successfully (the generating action failed). + bool incomplete = 4; + + // Inlined files that belong to this output group, requested via + // --build_event_inline_output_groups. + repeated File inline_files = 5; +} + +// Payload of the event indicating the completion of a target. The target is +// specified in the id. If the target failed the root causes are provided as +// children events. +message TargetComplete { + bool success = 1; + + // The kind of target (e.g., e.g. "cc_library rule", "source file", + // "generated file") where the completion is reported. + // Deprecated: use the target_kind field in TargetConfigured instead. + string target_kind = 5 [deprecated = true]; + + // The size of the test, if the target is a test target. Unset otherwise. + // Deprecated: use the test_size field in TargetConfigured instead. + TestSize test_size = 6 [deprecated = true]; + + // The output files are arranged by their output group. If an output file + // is part of multiple output groups, it appears once in each output + // group. + repeated OutputGroup output_group = 2; + + // Temporarily, also report the important outputs directly. This is only to + // allow existing clients help transition to the deduplicated representation; + // new clients should not use it. + repeated File important_output = 4 [deprecated = true]; + + // Report output artifacts (referenced transitively via output_group) which + // emit directories instead of singleton files. These directory_output entries + // will never include a uri. + repeated File directory_output = 8; + + // List of tags associated with this configured target. + repeated string tag = 3; + + // The timeout specified for test actions under this configured target. + // + // Deprecated, use `test_timeout` instead. + // + // TODO(yannic): Remove. + int64 test_timeout_seconds = 7 [deprecated = true]; + + // The timeout specified for test actions under this configured target. + google.protobuf.Duration test_timeout = 10; + + // Failure information about the target, only populated if success is false, + // and sometimes not even then. Equal to one of the ActionExecuted + // failure_detail fields for one of the root cause ActionExecuted events. + failure_details.FailureDetail failure_detail = 9; +} + +enum TestStatus { + NO_STATUS = 0; + PASSED = 1; + FLAKY = 2; + TIMEOUT = 3; + FAILED = 4; + INCOMPLETE = 5; + REMOTE_FAILURE = 6; + FAILED_TO_BUILD = 7; + TOOL_HALTED_BEFORE_TESTING = 8; +} + +// Payload on events reporting about individual test action. +message TestResult { + reserved 1; + + // The status of this test. + TestStatus status = 5; + + // Additional details about the status of the test. This is intended for + // user display and must not be parsed. + string status_details = 9; + + // True, if the reported attempt is taken from the tool's local cache. + bool cached_locally = 4; + + // Time in milliseconds since the epoch at which the test attempt was started. + // Note: for cached test results, this is time can be before the start of the + // build. + // + // Deprecated, use `test_attempt_start` instead. + // + // TODO(yannic): Remove. + int64 test_attempt_start_millis_epoch = 6 [deprecated = true]; + + // Time at which the test attempt was started. + // Note: for cached test results, this is time can be before the start of the + // build. + google.protobuf.Timestamp test_attempt_start = 10; + + // Time the test took to run. For locally cached results, this is the time + // the cached invocation took when it was invoked. + // + // Deprecated, use `test_attempt_duration` instead. + // + // TODO(yannic): Remove. + int64 test_attempt_duration_millis = 3 [deprecated = true]; + + // Time the test took to run. For locally cached results, this is the time + // the cached invocation took when it was invoked. + google.protobuf.Duration test_attempt_duration = 11; + + // Files (logs, test.xml, undeclared outputs, etc) generated by that test + // action. + repeated File test_action_output = 2; + + // Warnings generated by that test action. + repeated string warning = 7; + + // Message providing optional meta data on the execution of the test action, + // if available. + message ExecutionInfo { + // Deprecated, use TargetComplete.test_timeout instead. + int32 timeout_seconds = 1 [deprecated = true]; + + // Name of the strategy to execute this test action (e.g., "local", + // "remote") + string strategy = 2; + + // True, if the reported attempt was a cache hit in a remote cache. + bool cached_remotely = 6; + + // The exit code of the test action. + int32 exit_code = 7; + + // The hostname of the machine where the test action was executed (in case + // of remote execution), if known. + string hostname = 3; + + // Represents a hierarchical timing breakdown of an activity. + // The top level time should be the total time of the activity. + // Invariant: `time` >= sum of `time`s of all direct children. + message TimingBreakdown { + repeated TimingBreakdown child = 1; + string name = 2; + // Deprecated, use `time` instead. + // + // TODO(yannic): Remove. + int64 time_millis = 3 [deprecated = true]; + google.protobuf.Duration time = 4; + } + TimingBreakdown timing_breakdown = 4; + + message ResourceUsage { + string name = 1; + int64 value = 2; + } + repeated ResourceUsage resource_usage = 5; + } + ExecutionInfo execution_info = 8; +} + +// Event payload providing information about an active, individual test run. +message TestProgress { + // Identifies a resource that may provide information about an active test + // run. The resource is not necessarily a file and may need to be queried + // for information. The URI is not guaranteed to be available after the test + // completes. The string is encoded according to RFC2396. + string uri = 1; +} + +// Payload of the event summarizing a test. +message TestSummary { + // Wrapper around BlazeTestStatus to support importing that enum to proto3. + // Overall status of test, accumulated over all runs, shards, and attempts. + TestStatus overall_status = 5; + + // Total number of shard attempts. + // E.g., if a target has 4 runs, 3 shards, each with 2 attempts, + // then total_run_count will be 4*3*2 = 24. + int32 total_run_count = 1; + + // Value of runs_per_test for the test. + int32 run_count = 10; + + // Number of attempts. + // If there are a different number of attempts per shard, the highest attempt + // count across all shards for each run is used. + int32 attempt_count = 15; + + // Number of shards. + int32 shard_count = 11; + + // Path to logs of passed runs. + repeated File passed = 3; + + // Path to logs of failed runs; + repeated File failed = 4; + + // Total number of cached test actions + int32 total_num_cached = 6; + + // When the test first started running. + // + // Deprecated, use `first_start_time` instead. + // + // TODO(yannic): Remove. + int64 first_start_time_millis = 7 [deprecated = true]; + + // When the test first started running. + google.protobuf.Timestamp first_start_time = 13; + + // When the last test action completed. + // + // Deprecated, use `last_stop_time` instead. + // + // TODO(yannic): Remove. + int64 last_stop_time_millis = 8 [deprecated = true]; + + // When the test first started running. + google.protobuf.Timestamp last_stop_time = 14; + + // The total runtime of the test. + // + // Deprecated, use `total_run` instead. + // + // TODO(yannic): Remove. + int64 total_run_duration_millis = 9 [deprecated = true]; + + // The total runtime of the test. + google.protobuf.Duration total_run_duration = 12; +} + +// Payload of the event summarizing a target (test or non-test). +message TargetSummary { + // Conjunction of TargetComplete events for this target, including aspects. + bool overall_build_success = 1; + + // Repeats TestSummary's overall_status if available. + TestStatus overall_test_status = 2; +} + +// Event indicating the end of a build. +message BuildFinished { + // Exit code of a build. The possible values correspond to the predefined + // codes in bazel's lib.ExitCode class, as well as any custom exit code a + // module might define. The predefined exit codes are subject to change (but + // rarely do) and are not part of the public API. + // + // A build was successful iff ExitCode.code equals 0. + message ExitCode { + // The name of the exit code. + string name = 1; + + // The exit code. + int32 code = 2; + } + + // Things that happened during the build that could be of interest. + message AnomalyReport { + // Was the build suspended at any time during the build. + // Examples of suspensions are SIGSTOP, or the hardware being put to sleep. + // If was_suspended is true, then most of the timings for this build are + // suspect. + // NOTE: This is no longer set and is deprecated. + bool was_suspended = 1; + } + + // If the build succeeded or failed. + bool overall_success = 1 [deprecated = true]; + + // The overall status of the build. A build was successful iff + // ExitCode.code equals 0. + ExitCode exit_code = 3; + + // End of the build in ms since the epoch. + // + // Deprecated, use `finish_time` instead. + // + // TODO(yannic): Remove. + int64 finish_time_millis = 2 [deprecated = true]; + + // End of the build. + google.protobuf.Timestamp finish_time = 5; + + AnomalyReport anomaly_report = 4 [deprecated = true]; + + // Only populated if success = false, and sometimes not even then. + failure_details.FailureDetail failure_detail = 6; +} + +message BuildMetrics { + message ActionSummary { + // The total number of actions created and registered during the build, + // including both aspects and configured targets. This metric includes + // unused actions that were constructed but not executed during this build. + // It does not include actions that were created on prior builds that are + // still valid, even if those actions had to be re-executed on this build. + // For the total number of actions that would be created if this invocation + // were "clean", see BuildGraphMetrics below. + int64 actions_created = 1; + + // The total number of actions created this build just by configured + // targets. Used mainly to allow consumers of actions_created, which used to + // not include aspects' actions, to normalize across the Blaze release that + // switched actions_created to include all created actions. + int64 actions_created_not_including_aspects = 3; + + // The total number of actions executed during the build. This includes any + // remote cache hits, but excludes local action cache hits. + int64 actions_executed = 2; + + message ActionData { + string mnemonic = 1; + + // The total number of actions of this type executed during the build. As + // above, includes remote cache hits but excludes local action cache hits. + int64 actions_executed = 2; + + // When the first action of this type started being executed, in + // milliseconds from the epoch. + int64 first_started_ms = 3; + + // When the last action of this type ended being executed, in + // milliseconds from the epoch. + int64 last_ended_ms = 4; + + // Accumulated CPU time of all spawned actions of this type. + // This is only set if all the actions reported a time + google.protobuf.Duration system_time = 5; + google.protobuf.Duration user_time = 6; + + // The total number of actions of this type registered during the build. + int64 actions_created = 7; + } + // Contains the top N actions by number of actions executed. + repeated ActionData action_data = 4; + + // Deprecated. The total number of remote cache hits. + int64 remote_cache_hits = 5 [deprecated = true]; + + message RunnerCount { + string name = 1; + int32 count = 2; + string exec_kind = 3; + } + repeated RunnerCount runner_count = 6; + + blaze.ActionCacheStatistics action_cache_statistics = 7; + } + ActionSummary action_summary = 1; + + message MemoryMetrics { + // Size of the JVM heap post build in bytes. This is only collected if + // --memory_profile is set, since it forces a full GC. + int64 used_heap_size_post_build = 1; + + // Size of the peak JVM heap size in bytes post GC. Note that this reports 0 + // if there was no major GC during the build. + int64 peak_post_gc_heap_size = 2; + + // Size of the peak tenured space JVM heap size event in bytes post GC. Note + // that this reports 0 if there was no major GC during the build. + int64 peak_post_gc_tenured_space_heap_size = 4; + + message GarbageMetrics { + // Type of garbage collected, e.g. G1 Old Gen. + string type = 1; + // Number of bytes of garbage of the given type collected during this + // invocation. + int64 garbage_collected = 2; + } + + repeated GarbageMetrics garbage_metrics = 3; + } + MemoryMetrics memory_metrics = 2; + + message TargetMetrics { + // DEPRECATED + // No longer populated. It never measured what it was supposed to (targets + // loaded): it counted targets that were analyzed even if the underlying + // package had not changed. + // TODO(janakr): rename and remove. + int64 targets_loaded = 1; + + // Number of targets/aspects configured during this build. Does not include + // targets/aspects that were configured on prior builds on this server and + // were cached. See BuildGraphMetrics below if you need that. + int64 targets_configured = 2; + + // Number of configured targets analyzed during this build. Does not include + // aspects. Used mainly to allow consumers of targets_configured, which used + // to not include aspects, to normalize across the Blaze release that + // switched targets_configured to include aspects. + int64 targets_configured_not_including_aspects = 3; + } + TargetMetrics target_metrics = 3; + + message PackageMetrics { + // Number of BUILD files (aka packages) successfully loaded during this + // build. + // + // [For Bazel binaries built at source states] Before Dec 2021, this value + // was the number of packages attempted to be loaded, for a particular + // definition of "attempted". + // + // After Dec 2021, this value would sometimes overcount because the same + // package could sometimes be attempted to be loaded multiple times due to + // memory pressure. + // + // After Feb 2022, this value is the number of packages successfully + // loaded. + int64 packages_loaded = 1; + + // Loading time metrics per package. + repeated devtools.build.lib.packages.metrics.PackageLoadMetrics + package_load_metrics = 2; + } + PackageMetrics package_metrics = 4; + + message TimingMetrics { + // For Skymeld, it's possible that + // analysis_phase_time_in_ms + execution_phase_time_in_ms >= wall_time_in_ms + // + // The CPU time in milliseconds consumed during this build. + int64 cpu_time_in_ms = 1; + // The elapsed wall time in milliseconds during this build. + int64 wall_time_in_ms = 2; + // The elapsed wall time in milliseconds during the analysis phase. + // When analysis and execution phases are interleaved, this measures the + // elapsed time from the first analysis work to the last. + int64 analysis_phase_time_in_ms = 3; + // The elapsed wall time in milliseconds during the execution phase. + // When analysis and execution phases are interleaved, this measures the + // elapsed time from the first action execution (excluding workspace status + // actions) to the last. + int64 execution_phase_time_in_ms = 4; + + // The elapsed wall time in milliseconds until the first action execution + // started (excluding workspace status actions). + int64 actions_execution_start_in_ms = 5; + } + TimingMetrics timing_metrics = 5; + + message CumulativeMetrics { + // One-indexed number of "analyses" the server has run, including the + // current one. Will be incremented for every build/test/cquery/etc. command + // that reaches the analysis phase. + int32 num_analyses = 11; + // One-indexed number of "builds" the server has run, including the current + // one. Will be incremented for every build/test/run/etc. command that + // reaches the execution phase. + int32 num_builds = 12; + } + + CumulativeMetrics cumulative_metrics = 6; + + message ArtifactMetrics { + reserved 1; + + message FilesMetric { + int64 size_in_bytes = 1; + int32 count = 2; + } + + // Measures all source files newly read this build. Does not include + // unchanged sources on incremental builds. + FilesMetric source_artifacts_read = 2; + // Measures all output artifacts from executed actions. This includes + // actions that were cached locally (via the action cache) or remotely (via + // a remote cache or executor), but does *not* include outputs of actions + // that were cached internally in Skyframe. + FilesMetric output_artifacts_seen = 3; + // Measures all output artifacts from actions that were cached locally + // via the action cache. These artifacts were already present on disk at the + // start of the build. Does not include Skyframe-cached actions' outputs. + FilesMetric output_artifacts_from_action_cache = 4; + // Measures all artifacts that belong to a top-level output group. Does not + // deduplicate, so if there are two top-level targets in this build that + // share an artifact, it will be counted twice. + FilesMetric top_level_artifacts = 5; + } + + ArtifactMetrics artifact_metrics = 7; + + // Data about the evaluation of Skyfunctions. + message EvaluationStat { + // Name of the Skyfunction. + string skyfunction_name = 1; + // How many times a given operation was carried out on a Skyfunction. + int64 count = 2; + } + + // Information about the size and shape of the build graph. Some fields may + // not be populated if Bazel was able to skip steps due to caching. + message BuildGraphMetrics { + // How many configured targets/aspects were in this build, including any + // that were analyzed on a prior build and are still valid. May not be + // populated if analysis phase was fully cached. Note: for historical + // reasons this includes input/output files and other configured targets + // that do not actually have associated actions. + int32 action_lookup_value_count = 1; + // How many configured targets alone were in this build: always at most + // action_lookup_value_count. Useful mainly for historical comparisons to + // TargetMetrics.targets_configured, which used to not count aspects. This + // also includes configured targets that do not have associated actions. + int32 action_lookup_value_count_not_including_aspects = 5; + // How many actions belonged to the configured targets/aspects above. It may + // not be necessary to execute all of these actions to build the requested + // targets. May not be populated if analysis phase was fully cached. + int32 action_count = 2; + // How many actions belonged to configured targets: always at most + // action_count. Useful mainly for historical comparisons to + // ActionMetrics.actions_created, which used to not count aspects' actions. + int32 action_count_not_including_aspects = 6; + // How many "input file" configured targets there were: one per source file. + // Should agree with artifact_metrics.source_artifacts_read.count above, + int32 input_file_configured_target_count = 7; + // How many "output file" configured targets there were: output files that + // are targets (not implicit outputs). + int32 output_file_configured_target_count = 8; + // How many "other" configured targets there were (like alias, + // package_group, and other non-rule non-file configured targets). + int32 other_configured_target_count = 9; + // How many artifacts are outputs of the above actions. May not be populated + // if analysis phase was fully cached. + int32 output_artifact_count = 3; + // How many Skyframe nodes there are in memory at the end of the build. This + // may underestimate the number of nodes when running with memory-saving + // settings or with Skybuild, and may overestimate if there are nodes from + // prior evaluations still in the cache. + int32 post_invocation_skyframe_node_count = 4; + // Number of SkyValues that were dirtied during the build. Dirtied nodes are + // those that transitively depend on a node that changed by itself (e.g. one + // representing a file in the file system) + repeated EvaluationStat dirtied_values = 10; + // Number of SkyValues that changed by themselves. For example, when a file + // on the file system changes, the SkyValue representing it will change. + repeated EvaluationStat changed_values = 11; + // Number of SkyValues that were built. This means that they were evaluated + // and were found to have changed from their previous version. + repeated EvaluationStat built_values = 12; + // Number of SkyValues that were evaluated and found clean, i.e. equal to + // their previous version. + repeated EvaluationStat cleaned_values = 13; + + // For SkyKeys in 'done values' where the SkyValue is of type + // RuleConfiguredTargetValue, we pull those out separately and report the + // ruleClass and action count. + message RuleClassCount { + // Unique key for the rule class. + string key = 1; + + // String name of the rule_class (not guaranteed unique) + string rule_class = 2; + + // how many rule instances of this type were seen. + uint64 count = 3; + + // how many actions were created by this rule class. + uint64 action_count = 4; + } + repeated RuleClassCount rule_class = 14; + + // For SkyKeys whose function name is ASPECT break out that information + message AspectCount { + // Unique key for Aspect. + string key = 1; + + // usually the same as above, but can differ in some cases. + string aspect_name = 2; + + // number of aspects created of this type. + uint64 count = 3; + + // number of actions created by aspects of this type. + uint64 action_count = 4; + } + repeated AspectCount aspect = 15; + + // Removed due to overlap with EvaluationStat + reserved 16; + } + + BuildGraphMetrics build_graph_metrics = 8; + + // Information about all workers that were alive during the invocation. + message WorkerMetrics { + // Deprecated. Use worker_ids instead of this field. + int32 worker_id = 1 [deprecated = true]; + + // Ids of workers. Could be multiple in case of multiplex workers + repeated uint32 worker_ids = 8; + // Worker process id. If there is no process for worker, equals to zero. + uint32 process_id = 2; + // Mnemonic of running worker. + string mnemonic = 3; + // Multiplex or singleplex worker. + bool is_multiplex = 4; + // Using worker sandbox file system or not. + bool is_sandbox = 5; + // TODO(b/300067854): Deprecate since all worker metrics should have their + // WorkerStats set. + bool is_measurable = 6; + // Hash value of worker key. Needed to distinguish worker pools with same + // menmonic but with different worker keys. + int64 worker_key_hash = 9; + + WorkerStatus worker_status = 10; + + enum WorkerStatus { + // Used to indicate a worker instance where the process has not been + // created yet. In reality this isn't logged, but leaving this here as a + // possible option in the future. + NOT_STARTED = 0; + ALIVE = 1; + KILLED_DUE_TO_MEMORY_PRESSURE = 2; + // Indicates that the worker process was killed due to a reason unknown to + // Bazel at the point of measurement; if a known cause (below) comes along + // later on, this field will be updated. + KILLED_UNKNOWN = 3; + KILLED_DUE_TO_INTERRUPTED_EXCEPTION = 4; + KILLED_DUE_TO_IO_EXCEPTION = 5; + KILLED_DUE_TO_USER_EXEC_EXCEPTION = 6; + } + + optional failure_details.Worker.Code code = 12; + + int64 actions_executed = 11; + + int64 prior_actions_executed = 13; + + // Information collected from worker at some point. + message WorkerStats { + // Epoch unix time of collection of metrics. + int64 collect_time_in_ms = 1; + // Memory usage of worker process at the end of the build. + int32 worker_memory_in_kb = 2; + // Memory usage of the worker process prior to the invocation. + int32 prior_worker_memory_in_kb = 4; + // Epoch unix time of last action started on specific worker. + int64 last_action_start_time_in_ms = 3; + } + + // Combined workers statistics. + repeated WorkerStats worker_stats = 7; + } + + repeated WorkerMetrics worker_metrics = 9; + + // Information about host network. + message NetworkMetrics { + // Information for all the network traffic going on on the host machine + // during the invocation. + message SystemNetworkStats { + // Total bytes sent during the invocation. + uint64 bytes_sent = 1; + // Total bytes received during the invocation. + uint64 bytes_recv = 2; + // Total packets sent during the invocation. + uint64 packets_sent = 3; + // Total packets received during the invocation. + uint64 packets_recv = 4; + // Peak bytes/sec sent during the invocation. + uint64 peak_bytes_sent_per_sec = 5; + // Peak bytes/sec received during the invocation. + uint64 peak_bytes_recv_per_sec = 6; + // Peak packets/sec sent during the invocation. + uint64 peak_packets_sent_per_sec = 7; + // Peak packets/sec received during the invocation. + uint64 peak_packets_recv_per_sec = 8; + } + + SystemNetworkStats system_network_stats = 1; + } + + NetworkMetrics network_metrics = 10; + + // Information about worker pool actions. + message WorkerPoolMetrics { + // Statistics of worker pool per worker pool hash. Basically it's a map from + // worker pool hash to statistics. + repeated WorkerPoolStats worker_pool_stats = 1; + + message WorkerPoolStats { + // Hash of worker pool these stats are for. Contains information about + // startup flags. + int32 hash = 1; + // Mnemonic of workers these stats are for. + string mnemonic = 2; + // Number of workers created during a build. + int64 created_count = 3; + // Number of workers destroyed during a build (sum of all workers + // destroyed by eviction, UserExecException, IoException, + // InterruptedException and unknown reasons below). + int64 destroyed_count = 4; + // Number of workers evicted during a build. + int64 evicted_count = 5; + // Number of workers destroyed due to UserExecExceptions. + int64 user_exec_exception_destroyed_count = 6; + // Number of workers destroyed due to IoExceptions. + int64 io_exception_destroyed_count = 7; + // Number of workers destroyed due to InterruptedExceptions. + int64 interrupted_exception_destroyed_count = 8; + // Number of workers destroyed due to an unknown reason. + int64 unknown_destroyed_count = 9; + // Number of workers alive at the end of the build. + int64 alive_count = 10; + } + } + + WorkerPoolMetrics worker_pool_metrics = 11; + + // Information about dynamic execution. + message DynamicExecutionMetrics { + message RaceStatistics { + // Mnemonic of the action. + string mnemonic = 1; + // Name of runner of local branch. + string local_runner = 2; + // Name of runner of remote branch. + string remote_runner = 3; + // Number of wins of local branch in race. + int32 local_wins = 4; + // Number of wins of remote branch in race. + int32 remote_wins = 5; + } + // Race statistics grouped by mnemonic, local_name, remote_name. + repeated RaceStatistics race_statistics = 1; + } + + DynamicExecutionMetrics dynamic_execution_metrics = 12; +} + +// Event providing additional statistics/logs after completion of the build. +message BuildToolLogs { + repeated File log = 1; +} + +// Event describing all convenience symlinks (i.e., workspace symlinks) to be +// created or deleted once the execution phase has begun. Note that this event +// does not say anything about whether or not the build tool actually executed +// these filesystem operations; it only says what logical operations should be +// performed. This event is emitted exactly once per build; if no symlinks are +// to be modified, the event is still emitted with empty contents. +message ConvenienceSymlinksIdentified { + repeated ConvenienceSymlink convenience_symlinks = 1; +} + +// The message that contains what type of action to perform on a given path and +// target of a symlink. +message ConvenienceSymlink { + enum Action { + UNKNOWN = 0; + + // Indicates a symlink should be created, or overwritten if it already + // exists. + CREATE = 1; + + // Indicates a symlink should be deleted if it already exists. + DELETE = 2; + } + + // The path of the symlink to be created or deleted, absolute or relative to + // the workspace, creating any directories necessary. If a symlink already + // exists at that location, then it should be replaced by a symlink pointing + // to the new target. + string path = 1; + + // The operation we are performing on the symlink. + Action action = 2; + + // If action is CREATE, this is the target path (relative to the output base) + // that the symlink should point to. + // + // If action is DELETE, this field is not set. + string target = 3; +} + +// Event that contains the ExecRequest of a run command announced only after a +// successful build and before trying to execute the requested command-line. +message ExecRequestConstructed { + bytes working_directory = 1; + repeated bytes argv = 2; + repeated EnvironmentVariable environment_variable = 3; + repeated bytes environment_variable_to_clear = 4; + bool should_exec = 5; +} + +// An environment variable provided by a run command after a successful build. +message EnvironmentVariable { + bytes name = 1; + bytes value = 2; +} + +// Message describing a build event. Events will have an identifier that +// is unique within a given build invocation; they also announce follow-up +// events as children. More details, which are specific to the kind of event +// that is observed, is provided in the payload. More options for the payload +// might be added in the future. +message BuildEvent { + reserved 11, 19; + BuildEventId id = 1; + repeated BuildEventId children = 2; + bool last_message = 20; + oneof payload { + Progress progress = 3; + Aborted aborted = 4; + BuildStarted started = 5; + UnstructuredCommandLine unstructured_command_line = 12; + command_line.CommandLine structured_command_line = 22; + OptionsParsed options_parsed = 13; + WorkspaceStatus workspace_status = 16; + Fetch fetch = 21; + Configuration configuration = 17; + PatternExpanded expanded = 6; + TargetConfigured configured = 18; + ActionExecuted action = 7; + NamedSetOfFiles named_set_of_files = 15; + TargetComplete completed = 8; + TestResult test_result = 10; + TestProgress test_progress = 30; + TestSummary test_summary = 9; + TargetSummary target_summary = 28; + BuildFinished finished = 14; + BuildToolLogs build_tool_logs = 23; + BuildMetrics build_metrics = 24; + WorkspaceConfig workspace_info = 25; + BuildMetadata build_metadata = 26; + ConvenienceSymlinksIdentified convenience_symlinks_identified = 27; + ExecRequestConstructed exec_request = 29; + } +} diff --git a/nativelink-proto/google/devtools/build/v1/google/api/annotations.proto b/nativelink-proto/google/devtools/build/v1/google/api/annotations.proto new file mode 100644 index 0000000000..85c361b47f --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/nativelink-proto/google/devtools/build/v1/google/api/client.proto b/nativelink-proto/google/devtools/build/v1/google/api/client.proto new file mode 100644 index 0000000000..2102623d30 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/google/api/client.proto @@ -0,0 +1,99 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ClientProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + repeated string method_signature = 1051; +} + +extend google.protobuf.ServiceOptions { + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + string default_host = 1049; + + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + string oauth_scopes = 1050; +} diff --git a/nativelink-proto/google/devtools/build/v1/google/api/field_behavior.proto b/nativelink-proto/google/devtools/build/v1/google/api/field_behavior.proto new file mode 100644 index 0000000000..2865ba0537 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/google/api/field_behavior.proto @@ -0,0 +1,104 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "FieldBehaviorProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + repeated google.api.FieldBehavior field_behavior = 1052 [packed = false]; +} + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +enum FieldBehavior { + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED = 0; + + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL = 1; + + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED = 2; + + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY = 3; + + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY = 4; + + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE = 5; + + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + UNORDERED_LIST = 6; + + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + NON_EMPTY_DEFAULT = 7; + + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + IDENTIFIER = 8; +} diff --git a/nativelink-proto/google/devtools/build/v1/google/api/google/api/http.proto b/nativelink-proto/google/devtools/build/v1/google/api/google/api/http.proto new file mode 100644 index 0000000000..3fd2902a05 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/google/api/google/api/http.proto @@ -0,0 +1,388 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// ```text +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// ``` +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// ```text +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// ``` +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// ```text +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// ``` +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// ```text +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// ``` +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// ```proto +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// ``` +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// ```text +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// ``` +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// ```yaml +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// ``` +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/nativelink-proto/google/devtools/build/v1/google/api/google/protobuf/descriptor.proto b/nativelink-proto/google/devtools/build/v1/google/api/google/protobuf/descriptor.proto new file mode 100644 index 0000000000..9f0ce6cde0 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/google/api/google/protobuf/descriptor.proto @@ -0,0 +1,909 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/descriptorpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; + + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + optional bool proto3_optional = 17; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = true]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/nativelink-proto/google/devtools/build/v1/google/api/http.proto b/nativelink-proto/google/devtools/build/v1/google/api/http.proto new file mode 100644 index 0000000000..3fd2902a05 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/google/api/http.proto @@ -0,0 +1,388 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// ```text +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// ``` +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// ```text +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// ``` +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// ```text +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// ``` +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// ```text +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// ``` +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// ```proto +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// ``` +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// ```text +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// ``` +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// ```yaml +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// ``` +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/nativelink-proto/google/devtools/build/v1/src/main/protobuf/action_cache.proto b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/action_cache.proto new file mode 100644 index 0000000000..a31cba599e --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/action_cache.proto @@ -0,0 +1,63 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package blaze; + +option java_package = "com.google.devtools.build.lib.actions.cache"; +option java_outer_classname = "Protos"; + +// Information about the action cache behavior during a single build. +message ActionCacheStatistics { + // Size of the action cache in bytes. + // + // This is computed by the code that persists the action cache to disk and + // represents the size of the written files, which has no direct relation to + // the number of entries in the cache. + uint64 size_in_bytes = 1; + + // Time it took to save the action cache to disk. + uint64 save_time_in_ms = 2; + + // Reasons for not finding an action in the cache. + enum MissReason { + DIFFERENT_ACTION_KEY = 0; + DIFFERENT_DEPS = 1; + DIFFERENT_ENVIRONMENT = 2; + DIFFERENT_FILES = 3; + CORRUPTED_CACHE_ENTRY = 4; + NOT_CACHED = 5; + UNCONDITIONAL_EXECUTION = 6; + } + + // Detailed information for a particular miss reason. + message MissDetail { + MissReason reason = 1; + int32 count = 2; + } + + // Cache counters. + int32 hits = 3; + int32 misses = 4; + + // Breakdown of the cache misses based on the reasons behind them. + repeated MissDetail miss_details = 5; + + // Time it took to load the action cache from disk. Reported as 0 if the + // action cache has not been loaded in this invocation. + uint64 load_time_in_ms = 6; + + // NEXT TAG: 7 +} diff --git a/nativelink-proto/google/devtools/build/v1/src/main/protobuf/command_line.proto b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/command_line.proto new file mode 100644 index 0000000000..d5fa6aceb8 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/command_line.proto @@ -0,0 +1,102 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package command_line; + +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +import "src/main/protobuf/option_filters.proto"; + +// Representation of a Bazel command line. +message CommandLine { + // A title for this command line value, to differentiate it from others. + // In particular, a single invocation may wish to report both the literal and + // canonical command lines, and this label would be used to differentiate + // between both versions. This is a string for flexibility. + string command_line_label = 1; + + // A Bazel command line is made of distinct parts. For example, + // `bazel --nomaster_bazelrc test --nocache_test_results //foo:aTest` + // has the executable "bazel", a startup flag, a command "test", a command + // flag, and a test target. There could be many more flags and targets, or + // none (`bazel info` for example), but the basic structure is there. The + // command line should be broken down into these logical sections here. + repeated CommandLineSection sections = 2; +} + +// A section of the Bazel command line. +message CommandLineSection { + // The name of this section, such as "startup_option" or "command". + string section_label = 1; + + oneof section_type { + // Sections with non-options, such as the list of targets or the command, + // should use simple string chunks. + ChunkList chunk_list = 2; + + // Startup and command options are lists of options and belong here. + OptionList option_list = 3; + } +} + +// Wrapper to allow a list of strings in the "oneof" section_type. +message ChunkList { + repeated string chunk = 1; +} + +// Wrapper to allow a list of options in the "oneof" section_type. +message OptionList { + repeated Option option = 1; +} + +// A single command line option. +// +// This represents the option itself, but does not take into account the type of +// option or how the parser interpreted it. If this option is part of a command +// line that represents the actual input that Bazel received, it would, for +// example, include expansion flags as they are. However, if this option +// represents the canonical form of the command line, with the values as Bazel +// understands them, then the expansion flag, which has no value, would not +// appear, and the flags it expands to would. +message Option { + // How the option looks with the option and its value combined. Depending on + // the purpose of this command line report, this could be the canonical + // form, or the way that the flag was set. + // + // Some examples: this might be `--foo=bar` form, or `--foo bar` with a space; + // for boolean flags, `--nobaz` is accepted on top of `--baz=false` and other + // negating values, or for a positive value, the unqualified `--baz` form + // is also accepted. This could also be a short `-b`, if the flag has an + // abbreviated form. + string combined_form = 1; + + // The canonical name of the option, without the preceding dashes. + string option_name = 2; + + // The value of the flag, or unset for flags that do not take values. + // Especially for boolean flags, this should be in canonical form, the + // combined_form field above gives room for showing the flag as it was set + // if that is preferred. + string option_value = 3; + + // This flag's tagged effects. See OptionEffectTag's java documentation for + // details. + repeated options.OptionEffectTag effect_tags = 4; + + // Metadata about the flag. See OptionMetadataTag's java documentation for + // details. + repeated options.OptionMetadataTag metadata_tags = 5; +} diff --git a/nativelink-proto/google/devtools/build/v1/src/main/protobuf/failure_details.proto b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/failure_details.proto new file mode 100644 index 0000000000..a6078f507e --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/failure_details.proto @@ -0,0 +1,1364 @@ +// Copyright 2020 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file's messages describe any failure(s) that occurred during Bazel's +// handling of a request. The intent is to provide more detail to a Bazel client +// than is conveyed with an exit code, to help those clients decide how to +// respond to, or classify, a failure. + +syntax = "proto3"; + +package failure_details; + +option java_package = "com.google.devtools.build.lib.server"; + +import "google/protobuf/descriptor.proto"; + +message FailureDetailMetadata { + uint32 exit_code = 1; +} + + extend google.protobuf.EnumValueOptions { + FailureDetailMetadata metadata = 1078; +} + +// The FailureDetail message type is designed such that consumers can extract a +// basic classification of a FailureDetail message even if the consumer was +// built with a stale definition. This forward compatibility is implemented via +// conventions on FailureDetail and its submessage types, as follows. +// +// *** FailureDetail field numbers +// +// Field numbers 1 through 100 (inclusive) are reserved for generally applicable +// values. Any number of these fields may be set on a FailureDetail message. +// +// Field numbers 101 through 10,000 (inclusive) are reserved for use inside the +// "oneof" structure. Only one of these values should be set on a FailureDetail +// message. +// +// Additional fields numbers are unlikely to be needed, but, for extreme future- +// proofing purposes, field numbers 10,001 through 1,000,000 (inclusive; +// excluding protobuf's reserved range 19000 through 19999) are reserved for +// additional generally applicable values. +// +// *** FailureDetail's "oneof" submessages +// +// Each field in the "oneof" structure is a submessage corresponding to a +// category of failure. +// +// In each of these submessage types, field number 1 is an enum whose values +// correspond to a subcategory of the failure. Generally, the enum's constant +// which maps to 0 should be interpreted as "unspecified", though this is not +// required. +// +// *** Recommended forward compatibility strategy +// +// The recommended forward compatibility strategy is to reduce a FailureDetail +// message to a pair of integers. +// +// The first integer corresponds to the field number of the submessage set +// inside FailureDetail's "oneof", which corresponds with the failure's +// category. +// +// The second integer corresponds to the value of the enum at field number 1 +// within that submessage, which corresponds with the failure's subcategory. +// +// WARNING: This functionality is experimental and should not be relied on at +// this time. +// TODO(mschaller): remove experimental warning +message FailureDetail { + // A short human-readable message describing the failure, for debugging. + // + // This value is *not* intended to be used algorithmically. + string message = 1; + + // Reserved for future generally applicable values. Any of these may be set. + reserved 2 to 100; + + oneof category { + Interrupted interrupted = 101; + ExternalRepository external_repository = 103; + BuildProgress build_progress = 104; + RemoteOptions remote_options = 106; + ClientEnvironment client_environment = 107; + Crash crash = 108; + SymlinkForest symlink_forest = 110; + PackageOptions package_options = 114; + RemoteExecution remote_execution = 115; + Execution execution = 116; + Workspaces workspaces = 117; + CrashOptions crash_options = 118; + Filesystem filesystem = 119; + ExecutionOptions execution_options = 121; + Command command = 122; + Spawn spawn = 123; + GrpcServer grpc_server = 124; + CanonicalizeFlags canonicalize_flags = 125; + BuildConfiguration build_configuration = 126; + InfoCommand info_command = 127; + MemoryOptions memory_options = 129; + Query query = 130; + LocalExecution local_execution = 132; + ActionCache action_cache = 134; + FetchCommand fetch_command = 135; + SyncCommand sync_command = 136; + Sandbox sandbox = 137; + IncludeScanning include_scanning = 139; + TestCommand test_command = 140; + ActionQuery action_query = 141; + TargetPatterns target_patterns = 142; + CleanCommand clean_command = 144; + ConfigCommand config_command = 145; + ConfigurableQuery configurable_query = 146; + DumpCommand dump_command = 147; + HelpCommand help_command = 148; + MobileInstall mobile_install = 150; + ProfileCommand profile_command = 151; + RunCommand run_command = 152; + VersionCommand version_command = 153; + PrintActionCommand print_action_command = 154; + WorkspaceStatus workspace_status = 158; + JavaCompile java_compile = 159; + ActionRewinding action_rewinding = 160; + CppCompile cpp_compile = 161; + StarlarkAction starlark_action = 162; + NinjaAction ninja_action = 163; + DynamicExecution dynamic_execution = 164; + FailAction fail_action = 166; + SymlinkAction symlink_action = 167; + CppLink cpp_link = 168; + LtoAction lto_action = 169; + TestAction test_action = 172; + Worker worker = 173; + Analysis analysis = 174; + PackageLoading package_loading = 175; + Toolchain toolchain = 177; + StarlarkLoading starlark_loading = 179; + ExternalDeps external_deps = 181; + DiffAwareness diff_awareness = 182; + ModCommand mod_command = 183; + BuildReport build_report = 184; + Skyfocus skyfocus = 185; + } + + reserved 102; // For internal use + reserved 105; // For internal use + reserved 109; // For internal use + reserved 111 to 113; // For internal use + reserved 120; // For internal use + reserved 128; // For internal use + reserved 131; // For internal use + reserved 133; // For internal use + reserved 138; // For internal use + reserved 143; // For internal use + reserved 149; // For internal use + reserved 155 to 157; // For internal use + reserved 165; // For internal use + reserved 170 to 171; // For internal use + reserved 176; // For internal use + reserved 178; // For internal use + reserved 180; // For internal use +} + +message Interrupted { + enum Code { + // Unknown interrupt. Avoid using this code, instead use INTERRUPTED. + INTERRUPTED_UNKNOWN = 0 [(metadata) = { exit_code: 8 }]; + + // Command was interrupted (cancelled). + INTERRUPTED = 28 [(metadata) = { exit_code: 8 }]; + + // The following more specific interrupt codes have been deprecated and + // consolidated into INTERRUPTED. + DEPRECATED_BUILD = 4 [(metadata) = { exit_code: 8 }]; + DEPRECATED_BUILD_COMPLETION = 5 [(metadata) = { exit_code: 8 }]; + DEPRECATED_PACKAGE_LOADING_SYNC = 6 [(metadata) = { exit_code: 8 }]; + DEPRECATED_EXECUTOR_COMPLETION = 7 [(metadata) = { exit_code: 8 }]; + DEPRECATED_COMMAND_DISPATCH = 8 [(metadata) = { exit_code: 8 }]; + DEPRECATED_INFO_ITEM = 9 [(metadata) = { exit_code: 8 }]; + DEPRECATED_AFTER_QUERY = 10 [(metadata) = { exit_code: 8 }]; + DEPRECATED_FETCH_COMMAND = 17 [(metadata) = { exit_code: 8 }]; + DEPRECATED_SYNC_COMMAND = 18 [(metadata) = { exit_code: 8 }]; + DEPRECATED_CLEAN_COMMAND = 20 [(metadata) = { exit_code: 8 }]; + DEPRECATED_MOBILE_INSTALL_COMMAND = 21 [(metadata) = { exit_code: 8 }]; + DEPRECATED_QUERY = 22 [(metadata) = { exit_code: 8 }]; + DEPRECATED_RUN_COMMAND = 23 [(metadata) = { exit_code: 8 }]; + DEPRECATED_OPTIONS_PARSING = 27 [(metadata) = { exit_code: 8 }]; + + reserved 1 to 3; // For internal use + reserved 11 to 16; // For internal use + reserved 19; // For internal use + reserved 24 to 26; // For internal use + } + + Code code = 1; +} + +message Spawn { + enum Code { + SPAWN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + // See the SpawnResult.Status Java enum for definitions of the following + // Spawn failure codes. + NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }]; + TIMEOUT = 2 [(metadata) = { exit_code: 1 }]; + // Note: Spawn OUT_OF_MEMORY leads to a BUILD_FAILURE exit_code because the + // build tool itself did not run out of memory. + OUT_OF_MEMORY = 3 [(metadata) = { exit_code: 1 }]; + EXECUTION_FAILED = 4 [(metadata) = { exit_code: 34 }]; + EXECUTION_DENIED = 5 [(metadata) = { exit_code: 1 }]; + REMOTE_CACHE_FAILED = 6 [(metadata) = { exit_code: 34 }]; + COMMAND_LINE_EXPANSION_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + EXEC_IO_EXCEPTION = 8 [(metadata) = { exit_code: 36 }]; + INVALID_TIMEOUT = 9 [(metadata) = { exit_code: 1 }]; + INVALID_REMOTE_EXECUTION_PROPERTIES = 10 [(metadata) = { exit_code: 1 }]; + NO_USABLE_STRATEGY_FOUND = 11 [(metadata) = { exit_code: 1 }]; + // TODO(b/138456686): this code should be deprecated when SpawnResult is + // refactored to prohibit undetailed failures + UNSPECIFIED_EXECUTION_FAILURE = 12 [(metadata) = { exit_code: 1 }]; + FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }]; + // This also includes other remote cache errors, not just evictions, + // if --incompatible_remote_use_new_exit_code_for_lost_inputs is set. + // TODO: Rename it to a more general name when + // --experimental_remote_cache_eviction_retries is moved to + // non-experimental. + REMOTE_CACHE_EVICTED = 14 [(metadata) = { exit_code: 39 }]; + SPAWN_LOG_IO_EXCEPTION = 15 [(metadata) = { exit_code: 36 }]; + } + Code code = 1; + + // For Codes describing generic failure to spawn (eg. EXECUTION_FAILED and + // EXECUTION_DENIED) the `catastrophic` field may be set to true indicating a + // failure that immediately terminated the entire build tool. + bool catastrophic = 2; + + // If Code is NON_ZERO_EXIT, the `spawn_exit_code` field may be set to the + // non-zero exit code returned by the spawned process to the OS. + // + // NOTE: This field must not be confused with the build tool's overall + // exit code. + int32 spawn_exit_code = 3; +} + +message ExternalRepository { + enum Code { + EXTERNAL_REPOSITORY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OVERRIDE_DISALLOWED_MANAGED_DIRECTORIES = 1 [(metadata) = { exit_code: 2 }]; + BAD_DOWNLOADER_CONFIG = 2 [(metadata) = { exit_code: 2 }]; + REPOSITORY_MAPPING_RESOLUTION_FAILED = 3 [(metadata) = { exit_code: 37 }]; + CREDENTIALS_INIT_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + } + Code code = 1; + // Additional data could include external repository names. +} + +message BuildProgress { + enum Code { + BUILD_PROGRESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OUTPUT_INITIALIZATION = 3 [(metadata) = { exit_code: 36 }]; + BES_RUNS_PER_TEST_LIMIT_UNSUPPORTED = 4 [(metadata) = { exit_code: 2 }]; + BES_LOCAL_WRITE_ERROR = 5 [(metadata) = { exit_code: 36 }]; + BES_INITIALIZATION_ERROR = 6 [(metadata) = { exit_code: 36 }]; + BES_UPLOAD_TIMEOUT_ERROR = 7 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_TIMEOUT = 8 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_IO_ERROR = 9 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_INTERRUPTED = 10 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_CANCELED = 11 [(metadata) = { exit_code: 38 }]; + BES_FILE_WRITE_UNKNOWN_ERROR = 12 [(metadata) = { exit_code: 38 }]; + BES_UPLOAD_LOCAL_FILE_ERROR = 13 [(metadata) = { exit_code: 38 }]; + BES_STREAM_NOT_RETRYING_FAILURE = 14 [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_UNACK_EVENTS_ERROR = 15 + [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_UNSENT_EVENTS_ERROR = 16 + [(metadata) = { exit_code: 45 }]; + BES_STREAM_COMPLETED_WITH_REMOTE_ERROR = 19 + [(metadata) = { exit_code: 45 }]; + BES_UPLOAD_RETRY_LIMIT_EXCEEDED_FAILURE = 17 + [(metadata) = { exit_code: 38 }]; + reserved 1, 2, 18, 20; // For internal use + } + Code code = 1; + // Additional data could include the build progress upload endpoint. +} + +message RemoteOptions { + enum Code { + REMOTE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + REMOTE_DEFAULT_EXEC_PROPERTIES_LOGIC_ERROR = 1 + [(metadata) = { exit_code: 2 }]; + // Credentials could not be read from the requested file/socket/process/etc. + CREDENTIALS_READ_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + // Credentials could not be written to a shared, temporary file. + CREDENTIALS_WRITE_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + DOWNLOADER_WITHOUT_GRPC_CACHE = 4 [(metadata) = { exit_code: 2 }]; + EXECUTION_WITH_INVALID_CACHE = 5 [(metadata) = { exit_code: 2 }]; + + reserved 6; + } + + Code code = 1; +} + +message ClientEnvironment { + enum Code { + CLIENT_ENVIRONMENT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CLIENT_CWD_MALFORMED = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message Crash { + enum Code { + CRASH_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CRASH_OOM = 1 [(metadata) = { exit_code: 33 }]; + } + + Code code = 1; + + // The cause chain of the crash, with the outermost throwable first. Limited + // to the outermost exception and at most 4 nested causes (so, max size of 5). + repeated Throwable causes = 2; + + // True when the root cause of the crash was not an OutOfMemoryError, but + // CRASH_OOM was chosen because an OutOfMemoryError was detected prior to the + // crash. + bool oom_detector_override = 3; +} + +message Throwable { + // The class name of the java.lang.Throwable. + string throwable_class = 1; + // The throwable's message. + string message = 2; + // The result of calling toString on the deepest (i.e. closest to the + // throwable's construction site) 1000 (or fewer) StackTraceElements. + // Unstructured to simplify string matching. + repeated string stack_trace = 3; +} + +message SymlinkForest { + enum Code { + SYMLINK_FOREST_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOPLEVEL_OUTDIR_PACKAGE_PATH_CONFLICT = 1 [(metadata) = { exit_code: 2 }]; + TOPLEVEL_OUTDIR_USED_AS_SOURCE = 2 [(metadata) = { exit_code: 2 }]; + CREATION_FAILED = 3 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message BuildReport { + enum Code { + BUILD_REPORT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + BUILD_REPORT_UPLOADER_NEEDS_PACKAGE_PATHS = 1 + [(metadata) = { exit_code: 36 }]; + BUILD_REPORT_WRITE_FAILED = 2 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; + // Additional data for partial failures might include the build report that + // failed to be written. +} + +// Failure details for errors produced when using Skyfocus +message Skyfocus { + enum Code { + // The defined working set cannot be used for the focused targets. For + // example, this happens when the intersection of the working set and the + // transitive closure of the focused target is empty. + INVALID_WORKING_SET = 0 [(metadata) = { exit_code: 2 }]; + // The user needs to augment their working set to include the new file(s). + NON_WORKING_SET_CHANGE = 1 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_CHANGE = 2 [(metadata) = { exit_code: 2 }]; + DISALLOWED_OPERATION_ON_FOCUSED_GRAPH = 3 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message PackageOptions { + enum Code { + reserved 2, 3; // For internal use + + PACKAGE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PACKAGE_PATH_INVALID = 1 [(metadata) = { exit_code: 2 }]; + NONSINGLETON_PACKAGE_PATH = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message RemoteExecution { + // The association of some of these options with exit code 2, "command line + // error", seems sketchy. Especially worth reconsidering are the channel init + // failure modes, which can correspond to failures occurring in gRPC setup. + // These all correspond with current Bazel behavior. + enum Code { + REMOTE_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CAPABILITIES_QUERY_FAILURE = 1 [(metadata) = { exit_code: 34 }]; + CREDENTIALS_INIT_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + CACHE_INIT_FAILURE = 3 [(metadata) = { exit_code: 2 }]; + RPC_LOG_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + EXEC_CHANNEL_INIT_FAILURE = 5 [(metadata) = { exit_code: 2 }]; + CACHE_CHANNEL_INIT_FAILURE = 6 [(metadata) = { exit_code: 2 }]; + DOWNLOADER_CHANNEL_INIT_FAILURE = 7 [(metadata) = { exit_code: 2 }]; + LOG_DIR_CLEANUP_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + CLIENT_SERVER_INCOMPATIBLE = 9 [(metadata) = { exit_code: 34 }]; + DOWNLOADED_INPUTS_DELETION_FAILURE = 10 [(metadata) = { exit_code: 34 }]; + REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_DOTD = 11 + [(metadata) = { exit_code: 2 }]; + REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_JDEPS = 12 + [(metadata) = { exit_code: 2 }]; + INCOMPLETE_OUTPUT_DOWNLOAD_CLEANUP_FAILURE = 13 + [(metadata) = { exit_code: 36 }]; + REMOTE_DEFAULT_PLATFORM_PROPERTIES_PARSE_FAILURE = 14 + [(metadata) = { exit_code: 1 }]; + ILLEGAL_OUTPUT = 15 [(metadata) = { exit_code: 1 }]; + INVALID_EXEC_AND_PLATFORM_PROPERTIES = 16 [(metadata) = { exit_code: 1 }]; + TOPLEVEL_OUTPUTS_DOWNLOAD_FAILURE = 17 [(metadata) = { exit_code: 34 }]; + } + + Code code = 1; +} + +message Execution { + enum Code { + EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXECUTION_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + EXECUTION_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + EXECROOT_CREATION_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + TEMP_ACTION_OUTPUT_DIRECTORY_DELETION_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + TEMP_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 5 + [(metadata) = { exit_code: 36 }]; + PERSISTENT_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 6 + [(metadata) = { exit_code: 36 }]; + LOCAL_OUTPUT_DIRECTORY_SYMLINK_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + reserved 8; // was ACTION_INPUT_FILES_MISSING, now mostly + // SOURCE_INPUT_MISSING + LOCAL_TEMPLATE_EXPANSION_FAILURE = 9 [(metadata) = { exit_code: 36 }]; + INPUT_DIRECTORY_CHECK_IO_EXCEPTION = 10 [(metadata) = { exit_code: 36 }]; + EXTRA_ACTION_OUTPUT_CREATION_FAILURE = 11 [(metadata) = { exit_code: 36 }]; + TEST_RUNNER_IO_EXCEPTION = 12 [(metadata) = { exit_code: 36 }]; + FILE_WRITE_IO_EXCEPTION = 13 [(metadata) = { exit_code: 36 }]; + TEST_OUT_ERR_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_MANIFEST_COPY_IO_EXCEPTION = 15 + [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_MANIFEST_LINK_IO_EXCEPTION = 16 + [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_CREATION_IO_EXCEPTION = 17 [(metadata) = { exit_code: 36 }]; + SYMLINK_TREE_CREATION_COMMAND_EXCEPTION = 18 + [(metadata) = { exit_code: 36 }]; + ACTION_INPUT_READ_IO_EXCEPTION = 19 [(metadata) = { exit_code: 36 }]; + ACTION_NOT_UP_TO_DATE = 20 [(metadata) = { exit_code: 1 }]; + PSEUDO_ACTION_EXECUTION_PROHIBITED = 21 [(metadata) = { exit_code: 1 }]; + DISCOVERED_INPUT_DOES_NOT_EXIST = 22 [(metadata) = { exit_code: 36 }]; + ACTION_OUTPUTS_DELETION_FAILURE = 23 [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUTS_NOT_CREATED = 24 [(metadata) = { exit_code: 1 }]; + ACTION_FINALIZATION_FAILURE = 25 [(metadata) = { exit_code: 1 }]; + ACTION_INPUT_LOST = 26 [(metadata) = { exit_code: 1 }]; + FILESYSTEM_CONTEXT_UPDATE_FAILURE = 27 [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUT_CLOSE_FAILURE = 28 [(metadata) = { exit_code: 1 }]; + INPUT_DISCOVERY_IO_EXCEPTION = 29 [(metadata) = { exit_code: 1 }]; + TREE_ARTIFACT_DIRECTORY_CREATION_FAILURE = 30 + [(metadata) = { exit_code: 1 }]; + ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 31 + [(metadata) = { exit_code: 1 }]; + ACTION_FS_OUTPUT_DIRECTORY_CREATION_FAILURE = 32 + [(metadata) = { exit_code: 1 }]; + ACTION_FS_OUT_ERR_DIRECTORY_CREATION_FAILURE = 33 + [(metadata) = { exit_code: 1 }]; + NON_ACTION_EXECUTION_FAILURE = 34 [(metadata) = { exit_code: 1 }]; + CYCLE = 35 [(metadata) = { exit_code: 1 }]; + SOURCE_INPUT_MISSING = 36 [(metadata) = { exit_code: 1 }]; + UNEXPECTED_EXCEPTION = 37 [(metadata) = { exit_code: 1 }]; + reserved 38; + SOURCE_INPUT_IO_EXCEPTION = 39 [(metadata) = { exit_code: 1 }]; + SYMLINK_TREE_DELETION_IO_EXCEPTION = 40 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +// Failure details about Bazel's WORKSPACE features. +message Workspaces { + enum Code { + WORKSPACES_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + WORKSPACES_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + WORKSPACES_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + + // See `managed_directories` in + // https://bazel.build/rules/lib/globals#workspace. + ILLEGAL_WORKSPACE_FILE_SYMLINK_WITH_MANAGED_DIRECTORIES = 3 + [(metadata) = { exit_code: 1 }]; + WORKSPACE_FILE_READ_FAILURE_WITH_MANAGED_DIRECTORIES = 4 + [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message CrashOptions { + enum Code { + CRASH_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + reserved 1; // For internal use + } + + Code code = 1; +} + +message Filesystem { + enum Code { + FILESYSTEM_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + reserved 1; + reserved 2; + EMBEDDED_BINARIES_ENUMERATION_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + SERVER_PID_TXT_FILE_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + SERVER_FILE_WRITE_FAILURE = 5 [(metadata) = { exit_code: 36 }]; + DEFAULT_DIGEST_HASH_FUNCTION_INVALID_VALUE = 6 + [(metadata) = { exit_code: 2 }]; + FILESYSTEM_JNI_NOT_AVAILABLE = 8 [(metadata) = { exit_code: 36 }]; + + reserved 7, 9, 10; // For internal use + } + + Code code = 1; +} + +message ExecutionOptions { + // All numerical exit code associations correspond to pre-existing Bazel + // behavior. These associations are suspicious: + // - REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING (instead: 2?) + // - DEPRECATED_LOCAL_RESOURCES_USED (instead: 2?) + // TODO(b/138456686): Revise these after the (intentionally non-breaking) + // initial rollout of FailureDetail-based encoding. + enum Code { + EXECUTION_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_STRATEGY = 3 [(metadata) = { exit_code: 2 }]; + REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING = 4 + [(metadata) = { exit_code: 36 }]; + DEPRECATED_LOCAL_RESOURCES_USED = 5 [(metadata) = { exit_code: 36 }]; + INVALID_CYCLIC_DYNAMIC_STRATEGY = 6 [(metadata) = { exit_code: 36 }]; + RESTRICTION_UNMATCHED_TO_ACTION_CONTEXT = 7 [(metadata) = { exit_code: 2 }]; + REMOTE_FALLBACK_STRATEGY_NOT_ABSTRACT_SPAWN = 8 + [(metadata) = { exit_code: 2 }]; + STRATEGY_NOT_FOUND = 9 [(metadata) = { exit_code: 2 }]; + DYNAMIC_STRATEGY_NOT_SANDBOXED = 10 [(metadata) = { exit_code: 2 }]; + MULTIPLE_EXECUTION_LOG_FORMATS = 11 [(metadata) = { exit_code: 2 }]; + + reserved 1, 2; // For internal use + } + + Code code = 1; +} + +message Command { + enum Code { + // The name "COMMAND_UNKNOWN" might reasonably be interpreted as "command + // not found". The enum's default value should represent a lack of knowledge + // about the failure instead. + COMMAND_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_NOT_FOUND = 1 [(metadata) = { exit_code: 2 }]; + ANOTHER_COMMAND_RUNNING = 2 [(metadata) = { exit_code: 9 }]; + PREVIOUSLY_SHUTDOWN = 3 [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILE_FILE_INITIALIZATION_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILING_INITIALIZATION_FAILURE = 5 + [(metadata) = { exit_code: 36 }]; + STARLARK_CPU_PROFILE_FILE_WRITE_FAILURE = 6 + [(metadata) = { exit_code: 36 }]; + INVOCATION_POLICY_PARSE_FAILURE = 7 [(metadata) = { exit_code: 2 }]; + INVOCATION_POLICY_INVALID = 8 [(metadata) = { exit_code: 2 }]; + OPTIONS_PARSE_FAILURE = 9 [(metadata) = { exit_code: 2 }]; + STARLARK_OPTIONS_PARSE_FAILURE = 10 [(metadata) = { exit_code: 2 }]; + ARGUMENTS_NOT_RECOGNIZED = 11 [(metadata) = { exit_code: 2 }]; + NOT_IN_WORKSPACE = 12 [(metadata) = { exit_code: 2 }]; + SPACES_IN_WORKSPACE_PATH = 13 [(metadata) = { exit_code: 36 }]; + IN_OUTPUT_DIRECTORY = 14 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message GrpcServer { + enum Code { + GRPC_SERVER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + GRPC_SERVER_NOT_COMPILED_IN = 1 [(metadata) = { exit_code: 37 }]; + SERVER_BIND_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + BAD_COOKIE = 3 [(metadata) = { exit_code: 36 }]; + NO_CLIENT_DESCRIPTION = 4 [(metadata) = { exit_code: 36 }]; + reserved 5; // For internal use + } + + Code code = 1; +} + +message CanonicalizeFlags { + enum Code { + CANONICALIZE_FLAGS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + FOR_COMMAND_INVALID = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +// Failure modes described by this category pertain to the Bazel invocation +// configuration consumed by Bazel's analysis phase. This category is not +// intended as a grab-bag for all Bazel flag value constraint violations, which +// instead generally belong in the category for the subsystem whose flag values +// participate in the constraint. +message BuildConfiguration { + enum Code { + BUILD_CONFIGURATION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PLATFORM_MAPPING_EVALUATION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + PLATFORM_MAPPINGS_FILE_IS_DIRECTORY = 2 [(metadata) = { exit_code: 1 }]; + PLATFORM_MAPPINGS_FILE_NOT_FOUND = 3 [(metadata) = { exit_code: 1 }]; + TOP_LEVEL_CONFIGURATION_CREATION_FAILURE = 4 + [(metadata) = { exit_code: 1 }]; + INVALID_CONFIGURATION = 5 [(metadata) = { exit_code: 2 }]; + INVALID_BUILD_OPTIONS = 6 [(metadata) = { exit_code: 2 }]; + MULTI_CPU_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + HEURISTIC_INSTRUMENTATION_FILTER_INVALID = 8 + [(metadata) = { exit_code: 2 }]; + CYCLE = 9 [(metadata) = { exit_code: 2 }]; + CONFLICTING_CONFIGURATIONS = 10 [(metadata) = { exit_code: 2 }]; + // This can come from either an invalid user-specified option or a + // configuration transition. There's no sure-fire way to distinguish the two + // possibilities in Bazel, so we go with the more straightforward + // command-line error exit code 2. + INVALID_OUTPUT_DIRECTORY_MNEMONIC = 11 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_DISCARDED_ANALYSIS_CACHE = 12 [(metadata) = { exit_code: 2 }]; + // Failure modes specific to PROJECT.scl files. + INVALID_PROJECT = 13 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message InfoCommand { + // The distinction between a failure to write a single info item and a failure + // to write them all seems sketchy. Why do they have different exit codes? + // This reflects current Bazel behavior, but deserves more thought. + enum Code { + INFO_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOO_MANY_KEYS = 1 [(metadata) = { exit_code: 2 }]; + KEY_NOT_RECOGNIZED = 2 [(metadata) = { exit_code: 2 }]; + INFO_BLOCK_WRITE_FAILURE = 3 [(metadata) = { exit_code: 7 }]; + ALL_INFO_WRITE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message MemoryOptions { + enum Code { + MEMORY_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + // Deprecated: validation is now implemented by the option converter. + DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_THRESHOLD_INVALID_VALUE = 1 + [(metadata) = { exit_code: 2 }, deprecated = true]; + // Deprecated: no tenured collectors found is now a crash on startup. + DEPRECATED_EXPERIMENTAL_OOM_MORE_EAGERLY_NO_TENURED_COLLECTORS_FOUND = 2 + [(metadata) = { exit_code: 2 }, deprecated = true]; + } + + Code code = 1; +} + +message Query { + enum Code { + QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + QUERY_FILE_WITH_COMMAND_LINE_EXPRESSION = 1 [(metadata) = { exit_code: 2 }]; + QUERY_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }]; + OUTPUT_FORMAT_INVALID = 4 [(metadata) = { exit_code: 2 }]; + GRAPHLESS_PREREQ_UNMET = 5 [(metadata) = { exit_code: 2 }]; + QUERY_OUTPUT_WRITE_FAILURE = 6 [(metadata) = { exit_code: 36 }]; + QUERY_STDOUT_FLUSH_FAILURE = 13 [(metadata) = { exit_code: 36 }]; + ANALYSIS_QUERY_PREREQ_UNMET = 14 [(metadata) = { exit_code: 2 }]; + QUERY_RESULTS_FLUSH_FAILURE = 15 [(metadata) = { exit_code: 36 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_UNCLOSED_QUOTATION_EXPRESSION_ERROR = 16 + [(metadata) = { exit_code: 2 }]; + VARIABLE_NAME_INVALID = 17 [(metadata) = { exit_code: 7 }]; + VARIABLE_UNDEFINED = 18 [(metadata) = { exit_code: 7 }]; + BUILDFILES_AND_LOADFILES_CANNOT_USE_OUTPUT_LOCATION_ERROR = 19 + [(metadata) = { exit_code: 2 }]; + BUILD_FILE_ERROR = 20 [(metadata) = { exit_code: 7 }]; + CYCLE = 21 [(metadata) = { exit_code: 7 }]; + UNIQUE_SKYKEY_THRESHOLD_EXCEEDED = 22 [(metadata) = { exit_code: 7 }]; + TARGET_NOT_IN_UNIVERSE_SCOPE = 23 [(metadata) = { exit_code: 2 }]; + INVALID_FULL_UNIVERSE_EXPRESSION = 24 [(metadata) = { exit_code: 7 }]; + UNIVERSE_SCOPE_LIMIT_EXCEEDED = 25 [(metadata) = { exit_code: 7 }]; + INVALIDATION_LIMIT_EXCEEDED = 26 [(metadata) = { exit_code: 7 }]; + OUTPUT_FORMAT_PREREQ_UNMET = 27 [(metadata) = { exit_code: 2 }]; + ARGUMENTS_MISSING = 28 [(metadata) = { exit_code: 7 }]; + RBUILDFILES_FUNCTION_REQUIRES_SKYQUERY = 29 [(metadata) = { exit_code: 7 }]; + FULL_TARGETS_NOT_SUPPORTED = 30 [(metadata) = { exit_code: 7 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_UNEXPECTED_TOKEN_ERROR = 31 [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_INTEGER_LITERAL_MISSING = 32 [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_INVALID_STARTING_CHARACTER_ERROR = 33 + [(metadata) = { exit_code: 2 }]; + // Deprecated - folded into SYNTAX_ERROR. + DEPRECATED_PREMATURE_END_OF_INPUT_ERROR = 34 + [(metadata) = { exit_code: 2 }]; + // Indicates the user specified invalid query syntax. + SYNTAX_ERROR = 35 [(metadata) = { exit_code: 2 }]; + OUTPUT_FORMATTER_IO_EXCEPTION = 36 [(metadata) = { exit_code: 36 }]; + SKYQUERY_TRANSITIVE_TARGET_ERROR = 37 [(metadata) = { exit_code: 7 }]; + SKYQUERY_TARGET_EXCEPTION = 38 [(metadata) = { exit_code: 7 }]; + INVALID_LABEL_IN_TEST_SUITE = 39 [(metadata) = { exit_code: 7 }]; + // Indicates any usage of flags that must not be combined. + ILLEGAL_FLAG_COMBINATION = 40 [(metadata) = { exit_code: 2 }]; + // Indicates a non-detailed exception that halted a query. This is a + // deficiency in Blaze/Bazel and code should be changed to attach a detailed + // exit code to this failure mode. + NON_DETAILED_ERROR = 41 [(metadata) = { exit_code: 1 }]; + + reserved 7 to 12; // For internal use + } + + Code code = 1; +} + +message LocalExecution { + enum Code { + LOCAL_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOCKFREE_OUTPUT_PREREQ_UNMET = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message ActionCache { + enum Code { + ACTION_CACHE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message FetchCommand { + enum Code { + FETCH_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }]; + OPTIONS_INVALID = 2 [(metadata) = { exit_code: 2 }]; + QUERY_PARSE_ERROR = 3 [(metadata) = { exit_code: 2 }]; + QUERY_EVALUATION_ERROR = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message SyncCommand { + enum Code { + SYNC_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + PACKAGE_LOOKUP_ERROR = 1 [(metadata) = { exit_code: 7 }]; + WORKSPACE_EVALUATION_ERROR = 2 [(metadata) = { exit_code: 7 }]; + REPOSITORY_FETCH_ERRORS = 3 [(metadata) = { exit_code: 7 }]; + REPOSITORY_NAME_INVALID = 4 [(metadata) = { exit_code: 7 }]; + } + + Code code = 1; +} + +message Sandbox { + enum Code { + SANDBOX_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + EXECUTION_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }]; + DOCKER_COMMAND_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + NO_DOCKER_IMAGE = 4 [(metadata) = { exit_code: 1 }]; + DOCKER_IMAGE_PREPARATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + BIND_MOUNT_ANALYSIS_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + MOUNT_SOURCE_DOES_NOT_EXIST = 7 [(metadata) = { exit_code: 1 }]; + MOUNT_SOURCE_TARGET_TYPE_MISMATCH = 8 [(metadata) = { exit_code: 1 }]; + MOUNT_TARGET_DOES_NOT_EXIST = 9 [(metadata) = { exit_code: 1 }]; + SUBPROCESS_START_FAILED = 10 [(metadata) = { exit_code: 36 }]; + FORBIDDEN_INPUT = 11 [(metadata) = { exit_code: 1 }]; + COPY_INPUTS_IO_EXCEPTION = 12 [(metadata) = { exit_code: 36 }]; + COPY_OUTPUTS_IO_EXCEPTION = 13 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message IncludeScanning { + enum Code { + INCLUDE_SCANNING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INITIALIZE_INCLUDE_HINTS_ERROR = 1 [(metadata) = { exit_code: 36 }]; + SCANNING_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }]; + INCLUDE_HINTS_FILE_NOT_IN_PACKAGE = 3 [(metadata) = { exit_code: 36 }]; + INCLUDE_HINTS_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + ILLEGAL_ABSOLUTE_PATH = 5 [(metadata) = { exit_code: 1 }]; + // TODO(b/166268889): this code should be deprecated in favor of more finely + // resolved loading-phase codes. + PACKAGE_LOAD_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + USER_PACKAGE_LOAD_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + SYSTEM_PACKAGE_LOAD_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + UNDIFFERENTIATED_PACKAGE_LOAD_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; + PackageLoading.Code package_loading_code = 2; +} + +message TestCommand { + enum Code { + TEST_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_TEST_TARGETS = 1 [(metadata) = { exit_code: 4 }]; + TEST_WITH_NOANALYZE = 2 [(metadata) = { exit_code: 1 }]; + TESTS_FAILED = 3 [(metadata) = { exit_code: 3 }]; + } + + Code code = 1; +} + +message ActionQuery { + // All numerical exit code associations correspond to pre-existing Bazel + // behavior. These associations are suspicious: + // - COMMAND_LINE_EXPANSION_FAILURE: this is associated with 2, the numerical + // exit code for "bad Bazel command line", but is generated when an + // action's command line fails to expand, which sounds similar but is + // completely different. + // - OUTPUT_FAILURE: this is associated with 6, an undocumented exit code. + // - INVALID_AQUERY_EXPRESSION: this is associate with 1, which is not + // documented for (a)query. + // TODO(b/138456686): Revise these after the (intentionally non-breaking) + // initial rollout of FailureDetail-based encoding. + enum Code { + ACTION_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_LINE_EXPANSION_FAILURE = 1 [(metadata) = { exit_code: 2 }]; + OUTPUT_FAILURE = 2 [(metadata) = { exit_code: 6 }]; + COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }]; + EXPRESSION_PARSE_FAILURE = 4 [(metadata) = { exit_code: 2 }]; + SKYFRAME_STATE_WITH_COMMAND_LINE_EXPRESSION = 5 + [(metadata) = { exit_code: 2 }]; + INVALID_AQUERY_EXPRESSION = 6 [(metadata) = { exit_code: 1 }]; + SKYFRAME_STATE_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + AQUERY_OUTPUT_TOO_BIG = 8 [(metadata) = { exit_code: 7 }]; + ILLEGAL_PATTERN_SYNTAX = 9 [(metadata) = { exit_code: 2 }]; + INCORRECT_ARGUMENTS = 10 [(metadata) = { exit_code: 2 }]; + TOP_LEVEL_TARGETS_WITH_SKYFRAME_STATE_NOT_SUPPORTED = 11 + [(metadata) = { exit_code: 2 }]; + SKYFRAME_STATE_AFTER_EXECUTION = 12 [(metadata) = { exit_code: 1 }]; + LABELS_FUNCTION_NOT_SUPPORTED = 13 [(metadata) = { exit_code: 2 }]; + TEMPLATE_EXPANSION_FAILURE = 14 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message TargetPatterns { + enum Code { + TARGET_PATTERNS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TARGET_PATTERN_FILE_WITH_COMMAND_LINE_PATTERN = 1 + [(metadata) = { exit_code: 2 }]; + TARGET_PATTERN_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + TARGET_PATTERN_PARSE_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + PACKAGE_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }]; + TARGET_FORMAT_INVALID = 5 [(metadata) = { exit_code: 1 }]; + ABSOLUTE_TARGET_PATTERN_INVALID = 6 [(metadata) = { exit_code: 1 }]; + CANNOT_DETERMINE_TARGET_FROM_FILENAME = 7 [(metadata) = { exit_code: 1 }]; + LABEL_SYNTAX_ERROR = 8 [(metadata) = { exit_code: 1 }]; + TARGET_CANNOT_BE_EMPTY_STRING = 9 [(metadata) = { exit_code: 1 }]; + PACKAGE_PART_CANNOT_END_IN_SLASH = 10 [(metadata) = { exit_code: 1 }]; + CYCLE = 11 [(metadata) = { exit_code: 1 }]; + CANNOT_PRELOAD_TARGET = 12 [(metadata) = { exit_code: 1 }]; + TARGETS_MISSING = 13 [(metadata) = { exit_code: 1 }]; + RECURSIVE_TARGET_PATTERNS_NOT_ALLOWED = 14 [(metadata) = { exit_code: 1 }]; + UP_LEVEL_REFERENCES_NOT_ALLOWED = 15 [(metadata) = { exit_code: 1 }]; + NEGATIVE_TARGET_PATTERN_NOT_ALLOWED = 16 [(metadata) = { exit_code: 1 }]; + TARGET_MUST_BE_A_FILE = 17 [(metadata) = { exit_code: 1 }]; + DEPENDENCY_NOT_FOUND = 18 [(metadata) = { exit_code: 1 }]; + PACKAGE_NAME_INVALID = 19 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message CleanCommand { + enum Code { + CLEAN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OUTPUT_SERVICE_CLEAN_FAILURE = 1 [(metadata) = { exit_code: 6 }]; + ACTION_CACHE_CLEAN_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + OUT_ERR_CLOSE_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + OUTPUT_BASE_DELETE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + OUTPUT_BASE_TEMP_MOVE_FAILURE = 5 [(metadata) = { exit_code: 36 }]; + ASYNC_OUTPUT_BASE_DELETE_FAILURE = 6 [(metadata) = { exit_code: 6 }]; + EXECROOT_DELETE_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + EXECROOT_TEMP_MOVE_FAILURE = 8 [(metadata) = { exit_code: 36 }]; + ASYNC_EXECROOT_DELETE_FAILURE = 9 [(metadata) = { exit_code: 6 }]; + ARGUMENTS_NOT_RECOGNIZED = 10 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message ConfigCommand { + enum Code { + CONFIG_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TOO_MANY_CONFIG_IDS = 1 [(metadata) = { exit_code: 2 }]; + CONFIGURATION_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message ConfigurableQuery { + enum Code { + CONFIGURABLE_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_LINE_EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }]; + EXPRESSION_PARSE_FAILURE = 2 [(metadata) = { exit_code: 2 }]; + FILTERS_NOT_SUPPORTED = 3 [(metadata) = { exit_code: 2 }]; + BUILDFILES_FUNCTION_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 2 }]; + SIBLINGS_FUNCTION_NOT_SUPPORTED = 5 [(metadata) = { exit_code: 2 }]; + VISIBLE_FUNCTION_NOT_SUPPORTED = 6 [(metadata) = { exit_code: 2 }]; + ATTRIBUTE_MISSING = 7 [(metadata) = { exit_code: 2 }]; + INCORRECT_CONFIG_ARGUMENT_ERROR = 8 [(metadata) = { exit_code: 2 }]; + TARGET_MISSING = 9 [(metadata) = { exit_code: 2 }]; + STARLARK_SYNTAX_ERROR = 10 [(metadata) = { exit_code: 2 }]; + STARLARK_EVAL_ERROR = 11 [(metadata) = { exit_code: 2 }]; + // Indicates failure to correctly define a format function + FORMAT_FUNCTION_ERROR = 12 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message DumpCommand { + enum Code { + DUMP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_OUTPUT_SPECIFIED = 1 [(metadata) = { exit_code: 7 }]; + ACTION_CACHE_DUMP_FAILED = 2 [(metadata) = { exit_code: 7 }]; + COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 7 }]; + ACTION_GRAPH_DUMP_FAILED = 4 [(metadata) = { exit_code: 7 }]; + STARLARK_HEAP_DUMP_FAILED = 5 [(metadata) = { exit_code: 8 }]; + reserved 6; // For internal use + SKYFRAME_MEMORY_DUMP_FAILED = 7 [(metadata) = { exit_code: 7 }]; + SERIALIZED_FRONTIER_PROFILE_FAILED = 8 [(metadata) = { exit_code: 7 }]; + } + + Code code = 1; +} + +message HelpCommand { + enum Code { + HELP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_ARGUMENT = 1 [(metadata) = { exit_code: 2 }]; + COMMAND_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message MobileInstall { + enum Code { + MOBILE_INSTALL_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CLASSIC_UNSUPPORTED = 1 [(metadata) = { exit_code: 2 }]; + NO_TARGET_SPECIFIED = 2 [(metadata) = { exit_code: 2 }]; + MULTIPLE_TARGETS_SPECIFIED = 3 [(metadata) = { exit_code: 2 }]; + TARGET_TYPE_INVALID = 4 [(metadata) = { exit_code: 6 }]; + NON_ZERO_EXIT = 5 [(metadata) = { exit_code: 6 }]; + ERROR_RUNNING_PROGRAM = 6 [(metadata) = { exit_code: 6 }]; + } + + Code code = 1; +} + +message ProfileCommand { + enum Code { + PROFILE_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + OLD_BINARY_FORMAT_UNSUPPORTED = 1 [(metadata) = { exit_code: 1 }]; + FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message RunCommand { + enum Code { + RUN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_TARGET_SPECIFIED = 1 [(metadata) = { exit_code: 2 }]; + TOO_MANY_TARGETS_SPECIFIED = 2 [(metadata) = { exit_code: 2 }]; + TARGET_NOT_EXECUTABLE = 3 [(metadata) = { exit_code: 2 }]; + TARGET_BUILT_BUT_PATH_NOT_EXECUTABLE = 4 [(metadata) = { exit_code: 1 }]; + TARGET_BUILT_BUT_PATH_VALIDATION_FAILED = 5 + [(metadata) = { exit_code: 36 }]; + RUN_UNDER_TARGET_NOT_BUILT = 6 [(metadata) = { exit_code: 2 }]; + RUN_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }]; + TOO_MANY_TEST_SHARDS_OR_RUNS = 8 [(metadata) = { exit_code: 2 }]; + TEST_ENVIRONMENT_SETUP_FAILURE = 9 [(metadata) = { exit_code: 36 }]; + COMMAND_LINE_EXPANSION_FAILURE = 10 [(metadata) = { exit_code: 36 }]; + NO_SHELL_SPECIFIED = 11 [(metadata) = { exit_code: 2 }]; + SCRIPT_WRITE_FAILURE = 12 [(metadata) = { exit_code: 6 }]; + RUNFILES_DIRECTORIES_CREATION_FAILURE = 13 [(metadata) = { exit_code: 36 }]; + RUNFILES_SYMLINKS_CREATION_FAILURE = 14 [(metadata) = { exit_code: 36 }]; + TEST_ENVIRONMENT_SETUP_INTERRUPTED = 15 [(metadata) = { exit_code: 8 }]; + } + + Code code = 1; +} + +message VersionCommand { + enum Code { + VERSION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NOT_AVAILABLE = 1 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message PrintActionCommand { + enum Code { + PRINT_ACTION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + TARGET_NOT_FOUND = 1 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + TARGET_KIND_UNSUPPORTED = 3 [(metadata) = { exit_code: 1 }]; + ACTIONS_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message WorkspaceStatus { + enum Code { + WORKSPACE_STATUS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }]; + ABNORMAL_TERMINATION = 2 [(metadata) = { exit_code: 1 }]; + EXEC_FAILED = 3 [(metadata) = { exit_code: 1 }]; + PARSE_FAILURE = 4 [(metadata) = { exit_code: 36 }]; + VALIDATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + CONTENT_UPDATE_IO_EXCEPTION = 6 [(metadata) = { exit_code: 1 }]; + STDERR_IO_EXCEPTION = 7 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message JavaCompile { + enum Code { + JAVA_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + REDUCED_CLASSPATH_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + JDEPS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 36 }]; + REDUCED_CLASSPATH_FALLBACK_CLEANUP_FAILURE = 4 + [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message ActionRewinding { + enum Code { + ACTION_REWINDING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOST_INPUT_TOO_MANY_TIMES = 1 [(metadata) = { exit_code: 1 }]; + REWIND_LOST_INPUTS_PREREQ_UNMET = 3 [(metadata) = { exit_code: 2 }]; + LOST_OUTPUT_TOO_MANY_TIMES = 4 [(metadata) = { exit_code: 1 }]; + LOST_INPUT_REWINDING_DISABLED = 5 [(metadata) = { exit_code: 1 }]; + LOST_OUTPUT_REWINDING_DISABLED = 6 [(metadata) = { exit_code: 1 }]; + // Deprecated: attempting to rewind a source artifact is now a hard crash. + DEPRECATED_LOST_INPUT_IS_SOURCE = 2 + [(metadata) = { exit_code: 1 }, deprecated = true]; + } + + Code code = 1; +} + +message CppCompile { + enum Code { + CPP_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + FIND_USED_HEADERS_IO_EXCEPTION = 1 [(metadata) = { exit_code: 36 }]; + COPY_OUT_ERR_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + D_FILE_READ_FAILURE = 3 [(metadata) = { exit_code: 36 }]; + COMMAND_GENERATION_FAILURE = 4 [(metadata) = { exit_code: 1 }]; + MODULE_EXPANSION_TIMEOUT = 5 [(metadata) = { exit_code: 1 }]; + INCLUDE_PATH_OUTSIDE_EXEC_ROOT = 6 [(metadata) = { exit_code: 1 }]; + FAKE_COMMAND_GENERATION_FAILURE = 7 [(metadata) = { exit_code: 1 }]; + UNDECLARED_INCLUSIONS = 8 [(metadata) = { exit_code: 1 }]; + D_FILE_PARSE_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + COVERAGE_NOTES_CREATION_FAILURE = 10 [(metadata) = { exit_code: 1 }]; + MODULE_EXPANSION_MISSING_DATA = 11 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message StarlarkAction { + enum Code { + STARLARK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + UNUSED_INPUT_LIST_READ_FAILURE = 1 [(metadata) = { exit_code: 36 }]; + UNUSED_INPUT_LIST_FILE_NOT_FOUND = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message NinjaAction { + enum Code { + NINJA_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_DEPFILE_DECLARED_DEPENDENCY = 1 [(metadata) = { exit_code: 36 }]; + D_FILE_PARSE_FAILURE = 2 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message DynamicExecution { + enum Code { + DYNAMIC_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + XCODE_RELATED_PREREQ_UNMET = 1 [(metadata) = { exit_code: 36 }]; + ACTION_LOG_MOVE_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + RUN_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + NO_USABLE_STRATEGY_FOUND = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} + +message FailAction { + enum Code { + FAIL_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INTENTIONAL_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + INCORRECT_PYTHON_VERSION = 2 [(metadata) = { exit_code: 1 }]; + PROGUARD_SPECS_MISSING = 3 [(metadata) = { exit_code: 1 }]; + DYNAMIC_LINKING_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 1 }]; + SOURCE_FILES_MISSING = 5 [(metadata) = { exit_code: 1 }]; + INCORRECT_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }]; + FRAGMENT_CLASS_MISSING = 7 [(metadata) = { exit_code: 1 }]; + reserved 8, 9; // For internal use + CANT_BUILD_INCOMPATIBLE_TARGET = 10 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message SymlinkAction { + enum Code { + SYMLINK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + EXECUTABLE_INPUT_NOT_FILE = 1 [(metadata) = { exit_code: 1 }]; + EXECUTABLE_INPUT_IS_NOT = 2 [(metadata) = { exit_code: 1 }]; + EXECUTABLE_INPUT_CHECK_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }]; + LINK_CREATION_IO_EXCEPTION = 4 [(metadata) = { exit_code: 1 }]; + LINK_TOUCH_IO_EXCEPTION = 5 [(metadata) = { exit_code: 1 }]; + LINK_LOG_IO_EXCEPTION = 6 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message CppLink { + enum Code { + CPP_LINK_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + COMMAND_GENERATION_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + FAKE_COMMAND_GENERATION_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message LtoAction { + enum Code { + LTO_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + INVALID_ABSOLUTE_PATH_IN_IMPORTS = 1 [(metadata) = { exit_code: 1 }]; + MISSING_BITCODE_FILES = 2 [(metadata) = { exit_code: 1 }]; + IMPORTS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message TestAction { + enum Code { + TEST_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + NO_KEEP_GOING_TEST_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + LOCAL_TEST_PREREQ_UNMET = 2 [(metadata) = { exit_code: 1 }]; + COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 1 }]; + DUPLICATE_CPU_TAGS = 4 [(metadata) = { exit_code: 1 }]; + INVALID_CPU_TAG = 5 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Worker { + enum Code { + WORKER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MULTIPLEXER_INSTANCE_REMOVAL_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + MULTIPLEXER_DOES_NOT_EXIST = 2 [(metadata) = { exit_code: 1 }]; + NO_TOOLS = 3 [(metadata) = { exit_code: 1 }]; + NO_FLAGFILE = 4 [(metadata) = { exit_code: 1 }]; + VIRTUAL_INPUT_MATERIALIZATION_FAILURE = 5 [(metadata) = { exit_code: 1 }]; + BORROW_FAILURE = 6 [(metadata) = { exit_code: 1 }]; + PREFETCH_FAILURE = 7 [(metadata) = { exit_code: 36 }]; + PREPARE_FAILURE = 8 [(metadata) = { exit_code: 1 }]; + REQUEST_FAILURE = 9 [(metadata) = { exit_code: 1 }]; + PARSE_RESPONSE_FAILURE = 10 [(metadata) = { exit_code: 1 }]; + NO_RESPONSE = 11 [(metadata) = { exit_code: 1 }]; + FINISH_FAILURE = 12 [(metadata) = { exit_code: 1 }]; + FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Analysis { + enum Code { + ANALYSIS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + LOAD_FAILURE = 1 [(metadata) = { exit_code: 1 }]; + // TODO(b/138456686): this code should be deprecated in favor of more finely + // resolved loading-phase codes. + GENERIC_LOADING_PHASE_FAILURE = 2 [(metadata) = { exit_code: 1 }]; + NOT_ALL_TARGETS_ANALYZED = 3 [(metadata) = { exit_code: 1 }]; + CYCLE = 4 [(metadata) = { exit_code: 1 }]; + PARAMETERIZED_TOP_LEVEL_ASPECT_INVALID = 5 [(metadata) = { exit_code: 1 }]; + ASPECT_LABEL_SYNTAX_ERROR = 6 [(metadata) = { exit_code: 1 }]; + ASPECT_PREREQ_UNMET = 7 [(metadata) = { exit_code: 1 }]; + ASPECT_NOT_FOUND = 8 [(metadata) = { exit_code: 1 }]; + ACTION_CONFLICT = 9 [(metadata) = { exit_code: 1 }]; + ARTIFACT_PREFIX_CONFLICT = 10 [(metadata) = { exit_code: 1 }]; + UNEXPECTED_ANALYSIS_EXCEPTION = 11 [(metadata) = { exit_code: 1 }]; + TARGETS_MISSING_ENVIRONMENTS = 12 [(metadata) = { exit_code: 1 }]; + INVALID_ENVIRONMENT = 13 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_MISSING_FROM_GROUPS = 14 [(metadata) = { exit_code: 1 }]; + EXEC_GROUP_MISSING = 15 [(metadata) = { exit_code: 1 }]; + INVALID_EXECUTION_PLATFORM = 16 [(metadata) = { exit_code: 1 }]; + ASPECT_CREATION_FAILED = 17 [(metadata) = { exit_code: 1 }]; + CONFIGURED_VALUE_CREATION_FAILED = 18 [(metadata) = { exit_code: 1 }]; + INCOMPATIBLE_TARGET_REQUESTED = 19 [(metadata) = { exit_code: 1 }]; + ANALYSIS_FAILURE_PROPAGATION_FAILED = 20 [(metadata) = { exit_code: 1 }]; + ANALYSIS_CACHE_DISCARDED = 21 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message PackageLoading { + enum Code { + PACKAGE_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + WORKSPACE_FILE_ERROR = 1 [(metadata) = { exit_code: 1 }]; + MAX_COMPUTATION_STEPS_EXCEEDED = 2 [(metadata) = { exit_code: 1 }]; + BUILD_FILE_MISSING = 3 [(metadata) = { exit_code: 1 }]; + REPOSITORY_MISSING = 4 [(metadata) = { exit_code: 1 }]; + PERSISTENT_INCONSISTENT_FILESYSTEM_ERROR = 5 + [(metadata) = { exit_code: 36 }]; + TRANSIENT_INCONSISTENT_FILESYSTEM_ERROR = 6 + [(metadata) = { exit_code: 36 }]; + INVALID_NAME = 7 [(metadata) = { exit_code: 1 }]; + // was: PRELUDE_FILE_READ_ERROR. Replaced by IMPORT_STARLARK_FILE_ERROR + // when the prelude was changed to be loaded as a Starlark module. + reserved 8; + EVAL_GLOBS_SYMLINK_ERROR = 9 [(metadata) = { exit_code: 1 }]; + IMPORT_STARLARK_FILE_ERROR = 10 [(metadata) = { exit_code: 1 }]; + PACKAGE_MISSING = 11 [(metadata) = { exit_code: 1 }]; + TARGET_MISSING = 12 [(metadata) = { exit_code: 1 }]; + NO_SUCH_THING = 13 [(metadata) = { exit_code: 1 }]; + GLOB_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }]; + DUPLICATE_LABEL = 15 [(metadata) = { exit_code: 1 }]; + INVALID_PACKAGE_SPECIFICATION = 16 [(metadata) = { exit_code: 1 }]; + SYNTAX_ERROR = 17 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_IN_DIFFERENT_PACKAGE = 18 [(metadata) = { exit_code: 1 }]; + DEFAULT_ENVIRONMENT_UNDECLARED = 19 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_IN_MULTIPLE_GROUPS = 20 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_DOES_NOT_EXIST = 21 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_INVALID = 22 [(metadata) = { exit_code: 1 }]; + ENVIRONMENT_NOT_IN_GROUP = 23 [(metadata) = { exit_code: 1 }]; + PACKAGE_NAME_INVALID = 24 [(metadata) = { exit_code: 1 }]; + STARLARK_EVAL_ERROR = 25 [(metadata) = { exit_code: 1 }]; + LICENSE_PARSE_FAILURE = 26 [(metadata) = { exit_code: 1 }]; + DISTRIBUTIONS_PARSE_FAILURE = 27 [(metadata) = { exit_code: 1 }]; + LABEL_CROSSES_PACKAGE_BOUNDARY = 28 [(metadata) = { exit_code: 1 }]; + // Failure while evaluating or applying @_builtins injection. Since the + // builtins .bzl files are always packaged with Blaze in production, a + // failure here generally indicates a bug in Blaze. + BUILTINS_INJECTION_FAILURE = 29 [(metadata) = { exit_code: 1 }]; + SYMLINK_CYCLE_OR_INFINITE_EXPANSION = 30 [(metadata) = { exit_code: 1 }]; + OTHER_IO_EXCEPTION = 31 [(metadata) = { exit_code: 36 }]; + BAD_REPO_FILE = 32 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message Toolchain { + enum Code { + TOOLCHAIN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_PROVIDER = 1 [(metadata) = { exit_code: 1 }]; + INVALID_CONSTRAINT_VALUE = 2 [(metadata) = { exit_code: 1 }]; + INVALID_PLATFORM_VALUE = 3 [(metadata) = { exit_code: 1 }]; + INVALID_TOOLCHAIN = 4 [(metadata) = { exit_code: 1 }]; + NO_MATCHING_EXECUTION_PLATFORM = 5 [(metadata) = { exit_code: 1 }]; + NO_MATCHING_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }]; + INVALID_TOOLCHAIN_TYPE = 7 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message StarlarkLoading { + enum Code { + STARLARK_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + CYCLE = 1 [(metadata) = { exit_code: 1 }]; + COMPILE_ERROR = 2 [(metadata) = { exit_code: 1 }]; + PARSE_ERROR = 3 [(metadata) = { exit_code: 1 }]; + EVAL_ERROR = 4 [(metadata) = { exit_code: 1 }]; + CONTAINING_PACKAGE_NOT_FOUND = 5 [(metadata) = { exit_code: 1 }]; + PACKAGE_NOT_FOUND = 6 [(metadata) = { exit_code: 1 }]; + IO_ERROR = 7 [(metadata) = { exit_code: 1 }]; + LABEL_CROSSES_PACKAGE_BOUNDARY = 8 [(metadata) = { exit_code: 1 }]; + BUILTINS_ERROR = 9 [(metadata) = { exit_code: 1 }]; + VISIBILITY_ERROR = 10 [(metadata) = { exit_code: 1 }]; + } + + Code code = 1; +} + +message ExternalDeps { + enum Code { + EXTERNAL_DEPS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MODULE_NOT_FOUND = 1 [(metadata) = { exit_code: 48 }]; + BAD_MODULE = 2 [(metadata) = { exit_code: 48 }]; + VERSION_RESOLUTION_ERROR = 3 [(metadata) = { exit_code: 48 }]; + INVALID_REGISTRY_URL = 4 [(metadata) = { exit_code: 48 }]; + ERROR_ACCESSING_REGISTRY = 5 [(metadata) = { exit_code: 32 }]; + INVALID_EXTENSION_IMPORT = 6 [(metadata) = { exit_code: 48 }]; + BAD_LOCKFILE = 7 [(metadata) = { exit_code: 48 }]; + } + + Code code = 1; +} + +message DiffAwareness { + enum Code { + DIFF_AWARENESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + DIFF_STAT_FAILED = 1 [(metadata) = { exit_code: 36 }]; + } + + Code code = 1; +} + +message ModCommand { + enum Code { + MOD_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }]; + MISSING_ARGUMENTS = 1 [(metadata) = { exit_code: 2 }]; + TOO_MANY_ARGUMENTS = 2 [(metadata) = { exit_code: 2 }]; + INVALID_ARGUMENTS = 3 [(metadata) = { exit_code: 2 }]; + BUILDOZER_FAILED = 4 [(metadata) = { exit_code: 2 }]; + } + + Code code = 1; +} diff --git a/nativelink-proto/google/devtools/build/v1/src/main/protobuf/google/protobuf/descriptor.proto b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/google/protobuf/descriptor.proto new file mode 100644 index 0000000000..54d5cf4532 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/google/protobuf/descriptor.proto @@ -0,0 +1,909 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf.descriptor; + +option go_package = "google.golang.org/protobuf/types/descriptorpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; + + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + optional bool proto3_optional = 17; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = true]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/nativelink-proto/google/devtools/build/v1/src/main/protobuf/invocation_policy.proto b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/invocation_policy.proto new file mode 100644 index 0000000000..56e3cc0c2d --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/invocation_policy.proto @@ -0,0 +1,207 @@ +// Copyright 2015 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package blaze.invocation_policy; + +import "src/main/protobuf/strategy_policy.proto"; + +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +// The --invocation_policy flag takes a base64-encoded binary-serialized or text +// formatted InvocationPolicy message. +message InvocationPolicy { + // Order matters. + // After expanding policies on expansion flags or flags with implicit + // requirements, only the final policy on a specific flag will be enforced + // onto the user's command line. + repeated FlagPolicy flag_policies = 1; + + optional blaze.strategy_policy.StrategyPolicy strategy_policy = 2; +} + +// A policy for controlling the value of a flag. +message FlagPolicy { + // The name of the flag to enforce this policy on. + // + // Note that this should be the full name of the flag, not the abbreviated + // name of the flag. If the user specifies the abbreviated name of a flag, + // that flag will be matched using its full name. + // + // The "no" prefix will not be parsed, so for boolean flags, use + // the flag's full name and explicitly set it to true or false. + optional string flag_name = 1; + + // If set, this flag policy is applied only if one of the given commands or a + // command that inherits from one of the given commands is being run. For + // instance, if "build" is one of the commands here, then this policy will + // apply to any command that inherits from build, such as info, coverage, or + // test. If empty, this flag policy is applied for all commands. This allows + // the policy setter to add all policies to the proto without having to + // determine which Bazel command the user is actually running. Additionally, + // Bazel allows multiple flags to be defined by the same name, and the + // specific flag definition is determined by the command. + repeated string commands = 2; + + oneof operation { + SetValue set_value = 3; + UseDefault use_default = 4; + DisallowValues disallow_values = 5; + AllowValues allow_values = 6; + } +} + +message SetValue { + // Use this value for the specified flag, overriding any default or user-set + // value (unless behavior = APPEND for repeatable flags). + // + // This field is repeated for repeatable flags. It is an error to set + // multiple values for a flag that is not actually a repeatable flag. + // This requires at least 1 value, if even the empty string. + // + // If the flag allows multiple values, all of its values are replaced with the + // value or values from the policy (i.e., no diffing or merging is performed), + // unless behavior = APPEND (see below). + // + // Note that some flags are tricky. For example, some flags look like boolean + // flags, but are actually Void expansion flags that expand into other flags. + // The Bazel flag parser will accept "--void_flag=false", but because + // the flag is Void, the "=false" is ignored. It can get even trickier, like + // "--novoid_flag" which is also an expansion flag with the type Void whose + // name is explicitly "novoid_flag" and which expands into other flags that + // are the opposite of "--void_flag". For expansion flags, it's best to + // explicitly override the flags they expand into. + // + // Other flags may be differently tricky: A flag could have a converter that + // converts some string to a list of values, but that flag may not itself have + // allowMultiple set to true. + // + // An example is "--test_tag_filters": this flag sets its converter to + // CommaSeparatedOptionListConverter, but does not set allowMultiple to true. + // So "--test_tag_filters=foo,bar" results in ["foo", "bar"], however + // "--test_tag_filters=foo --test_tag_filters=bar" results in just ["bar"] + // since the 2nd value overrides the 1st. + // + // Similarly, "--test_tag_filters=foo,bar --test_tag_filters=baz,qux" results + // in ["baz", "qux"]. For flags like these, the policy should specify + // "foo,bar" instead of separately specifying "foo" and "bar" so that the + // converter is appropriately invoked. + // + // Note that the opposite is not necessarily + // true: for a flag that specifies allowMultiple=true, "--flag=foo,bar" + // may fail to parse or result in an unexpected value. + repeated string flag_value = 1; + + // Obsolete overridable and append fields. + reserved 2, 3; + + enum Behavior { + UNDEFINED = 0; + // Change the flag value but allow it to be overridden by explicit settings + // from command line/config expansion/rc files. + // Matching old flag values: append = false, overridable = true. + ALLOW_OVERRIDES = 1; + // Append a new value for a repeatable flag, leave old values and allow + // further overrides. + // Matching old flag values: append = true, overridable = false. + APPEND = 2; + // Set a final value of the flag. Any overrides provided by the user for + // this flag will be ignored. + // Matching old flag values: append = false, overridable = false. + FINAL_VALUE_IGNORE_OVERRIDES = 3; + } + + // Defines how invocation policy should interact with user settings for the + // same flag. + optional Behavior behavior = 4; +} + +message UseDefault { + // Use the default value of the flag, as defined by Bazel (or equivalently, do + // not allow the user to set this flag). + // + // Note on implementation: UseDefault sets the default by clearing the flag, + // so that when the value is requested and no flag is found, the flag parser + // returns the default. This is mostly relevant for expansion flags: it will + // erase user values in *all* flags that the expansion flag expands to. Only + // use this on expansion flags if this is acceptable behavior. Since the last + // policy wins, later policies on this same flag will still remove the + // expanded UseDefault, so there is a way around, but it's really best not to + // use this on expansion flags at all. +} + +message DisallowValues { + // Obsolete new_default_value field. + reserved 2; + + // It is an error for the user to use any of these values (that is, the Bazel + // command will fail), unless new_value or use_default is set. + // + // For repeatable flags, if any one of the values in the flag matches a value + // in the list of disallowed values, an error is thrown. + // + // Care must be taken for flags with complicated converters. For example, + // it's possible for a repeated flag to be of type List>, so that + // "--foo=a,b --foo=c,d" results in foo=[["a","b"], ["c", "d"]]. In this case, + // it is not possible to disallow just "b", nor will ["b", "a"] match, nor + // will ["b", "c"] (but ["a", "b"] will still match). + repeated string disallowed_values = 1; + + oneof replacement_value { + // If set and if the value of the flag is disallowed (including the default + // value of the flag if the user doesn't specify a value), use this value as + // the value of the flag instead of raising an error. This does not apply to + // repeatable flags and is ignored if the flag is a repeatable flag. + string new_value = 3; + + // If set and if the value of the flag is disallowed, use the default value + // of the flag instead of raising an error. Unlike new_value, this works for + // repeatable flags, but note that the default value for repeatable flags is + // always empty. + // + // Note that it is an error to disallow the default value of the flag and + // to set use_default, unless the flag is a repeatable flag where the + // default value is always the empty list. + UseDefault use_default = 4; + } +} + +message AllowValues { + // Obsolete new_default_value field. + reserved 2; + + // It is an error for the user to use any value not in this list, unless + // new_value or use_default is set. + repeated string allowed_values = 1; + + oneof replacement_value { + // If set and if the value of the flag is disallowed (including the default + // value of the flag if the user doesn't specify a value), use this value as + // the value of the flag instead of raising an error. This does not apply to + // repeatable flags and is ignored if the flag is a repeatable flag. + string new_value = 3; + + // If set and if the value of the flag is disallowed, use the default value + // of the flag instead of raising an error. Unlike new_value, this works for + // repeatable flags, but note that the default value for repeatable flags is + // always empty. + // + // Note that it is an error to disallow the default value of the flag and + // to set use_default, unless the flag is a repeatable flag where the + // default value is always the empty list. + UseDefault use_default = 4; + } +} diff --git a/nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/option_filters.proto b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/option_filters.proto new file mode 100644 index 0000000000..629e006888 --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/option_filters.proto @@ -0,0 +1,61 @@ +// Copyright 2017 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto3"; + +package options; + +// option java_api_version = 2; +option java_package = "com.google.devtools.common.options.proto"; + +// IMPORTANT NOTE: These two enums must be kept in sync with their Java +// equivalents in src/main/java/com/google/devtools/common/options. +// Changing this proto has specific compatibility requirements, please see the +// Java documentation for details. + +// Docs in java enum. +enum OptionEffectTag { + // This option's effect or intent is unknown. + UNKNOWN = 0; + + // This flag has literally no effect. + NO_OP = 1; + + LOSES_INCREMENTAL_STATE = 2; + CHANGES_INPUTS = 3; + AFFECTS_OUTPUTS = 4; + BUILD_FILE_SEMANTICS = 5; + BAZEL_INTERNAL_CONFIGURATION = 6; + LOADING_AND_ANALYSIS = 7; + EXECUTION = 8; + HOST_MACHINE_RESOURCE_OPTIMIZATIONS = 9; + EAGERNESS_TO_EXIT = 10; + BAZEL_MONITORING = 11; + TERMINAL_OUTPUT = 12; + ACTION_COMMAND_LINES = 13; + TEST_RUNNER = 14; +} + +// Docs in java enum. +enum OptionMetadataTag { + EXPERIMENTAL = 0; + INCOMPATIBLE_CHANGE = 1; + DEPRECATED = 2; + HIDDEN = 3; + INTERNAL = 4; + reserved "TRIGGERED_BY_ALL_INCOMPATIBLE_CHANGES"; + reserved 5; + reserved "EXPLICIT_IN_OUTPUT_PATH"; + reserved 6; + IMMUTABLE = 7; +} diff --git a/nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/strategy_policy.proto b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/strategy_policy.proto new file mode 100644 index 0000000000..0f58c9b81f --- /dev/null +++ b/nativelink-proto/google/devtools/build/v1/src/main/protobuf/src/main/protobuf/strategy_policy.proto @@ -0,0 +1,67 @@ +// Copyright 2022 The Bazel Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package blaze.strategy_policy; + +option java_multiple_files = true; +// option java_api_version = 2; +option java_package = "com.google.devtools.build.lib.runtime.proto"; + +// Provides control over what strategies (local, remote, etc) may be used. +// +// An empty policies (e.g. unset) implies no enforcement, anything is allowed. +// +// Policies are enforced against both user-provided values (flags) and +// application-internal defaults. The latter is useful for guarding against +// unexpectedly hard-coded defaults. +// +// Sample usage to allow everything to execute remotely, while only allowing +// genrules to execute locally: +// +// strategy_policy { +// mnemonic_policy { +// default_allowlist: ["remote"] +// strategy_allowlist: [ +// { mnemonic: "Genrule" strategy: ["local"] } +// ] +// } +// } +message StrategyPolicy { + // Controls per-mnemonic policies for regular spawn/action execution. Relevant + // command-line flags this controls include --strategy and --genrule_strategy. + optional MnemonicPolicy mnemonic_policy = 1; + + // Controls per-mnemonic policies for the remote execution leg of dynamic + // execution. Relevant flag is --dynamic_remote_strategy. + optional MnemonicPolicy dynamic_remote_policy = 2; + + // Controls per-mnemonic policies for the local execution leg of dynamic + // execution. Relevant flag is --dynamic_local_strategy. + optional MnemonicPolicy dynamic_local_policy = 3; +} + +message MnemonicPolicy { + // Default allowed strategies for mnemonics not present in `strategy` list. + repeated string default_allowlist = 1; + + repeated StrategiesForMnemonic strategy_allowlist = 2; +} + +// Per-mnemonic allowlist settings. +message StrategiesForMnemonic { + optional string mnemonic = 1; + repeated string strategy = 2; +} diff --git a/tools/pre-commit-hooks.nix b/tools/pre-commit-hooks.nix index 144de53364..fcdc637f3d 100644 --- a/tools/pre-commit-hooks.nix +++ b/tools/pre-commit-hooks.nix @@ -65,6 +65,7 @@ in { # Bun binary lockfile "web/app/bun.lockb" + "web/bridge/bun.lockb" ]; enable = true; types = ["binary"]; diff --git a/web/app/src/pages/app/index.astro b/web/app/src/pages/app/index.astro new file mode 100644 index 0000000000..1e349bd93c --- /dev/null +++ b/web/app/src/pages/app/index.astro @@ -0,0 +1,50 @@ +--- +import Layout from "../../layouts/Layout.astro"; + +--- + + +
+ +
+
+ + +
diff --git a/web/bridge/.gitignore b/web/bridge/.gitignore new file mode 100644 index 0000000000..9b1ee42e84 --- /dev/null +++ b/web/bridge/.gitignore @@ -0,0 +1,175 @@ +# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore + +# Logs + +logs +_.log +npm-debug.log_ +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Caches + +.cache + +# Diagnostic reports (https://nodejs.org/api/report.html) + +report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json + +# Runtime data + +pids +_.pid +_.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover + +lib-cov + +# Coverage directory used by tools like istanbul + +coverage +*.lcov + +# nyc test coverage + +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) + +.grunt + +# Bower dependency directory (https://bower.io/) + +bower_components + +# node-waf configuration + +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) + +build/Release + +# Dependency directories + +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) + +web_modules/ + +# TypeScript cache + +*.tsbuildinfo + +# Optional npm cache directory + +.npm + +# Optional eslint cache + +.eslintcache + +# Optional stylelint cache + +.stylelintcache + +# Microbundle cache + +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history + +.node_repl_history + +# Output of 'npm pack' + +*.tgz + +# Yarn Integrity file + +.yarn-integrity + +# dotenv environment variable files + +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) + +.parcel-cache + +# Next.js build output + +.next +out + +# Nuxt.js build / generate output + +.nuxt +dist + +# Gatsby files + +# Comment in the public line in if your project uses Gatsby and not Next.js + +# https://nextjs.org/blog/next-9-1#public-directory-support + +# public + +# vuepress build output + +.vuepress/dist + +# vuepress v2.x temp and cache directory + +.temp + +# Docusaurus cache and generated files + +.docusaurus + +# Serverless directories + +.serverless/ + +# FuseBox cache + +.fusebox/ + +# DynamoDB Local files + +.dynamodb/ + +# TernJS port file + +.tern-port + +# Stores VSCode versions used for testing VSCode extensions + +.vscode-test + +# yarn v2 + +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* + +# IntelliJ based IDEs +.idea + +# Finder (MacOS) folder config +.DS_Store diff --git a/web/bridge/README.md b/web/bridge/README.md new file mode 100644 index 0000000000..e75a4d2e7e --- /dev/null +++ b/web/bridge/README.md @@ -0,0 +1,74 @@ +# NativeLink Bridge + +Make sure you are running an instance of Redis or DragonflyDB in your local network. + +For DragonflyDB inside docker run: + +```bash +docker run -d --name some-dragonfly -p 6379:6379 --ulimit memlock=-1 docker.dragonflydb.io/dragonflydb/dragonfly +``` + +For Redis inside docker run: + +```bash +docker run -d --name some-redis -p 6379:6379 redis +``` + +## You need 4 Shells to be open + +### 1. Shell (NativeLink) + +Start an instance of NativeLink and connect it with the basic_cas_test_conf.json to the redis/dragonflydb: + +```bash +cd ../.. && ./result/bin/nativelink ./nativelink-config/examples/basic_cas_test_conf.json +``` + +## 2. Shell (NativeLink Web Bridge) + +Install dependencies of the bridges: + +```bash +bun i +``` + +Run the Bridge: + +```bash +bun run index.ts +``` + +## 3. Shell (NativeLink Web App) + +Inside the web/app directory run: + +```bash +bun i & bun dev +``` + +Now you can open http://localhost:4321/app. + + +## 4. Shell (Bazel) + +Now you can run your Bazel build with NativeLink and see it in real-time going into the web app + +Include this in your .bazelrc +```bash +build --remote_instance_name=main +build --remote_cache=grpc://127.0.0.1:50051 +build --remote_executor=grpc://127.0.0.1:50051 +build --bes_backend=grpc://127.0.0.1:50061 +build --bes_results_url=http://127.0.0.1:50061/ +build --bes_upload_mode=fully_async +build --build_event_publish_all_actions=true +``` + +```bash +bazel build some-target +``` + + + + +This project was created using `bun init` in bun v1.1.25. [Bun](https://bun.sh) is a fast all-in-one JavaScript runtime. diff --git a/web/bridge/bun.lockb b/web/bridge/bun.lockb new file mode 100755 index 0000000000000000000000000000000000000000..de06657f7fea92ac9a479ab4d546e26317af292f GIT binary patch literal 11334 zcmeHNc|6p6_aB6}d`URf@6ePEp@5qmiF7O zg_L$JM3lCxg;FBAEtlUpGv5#2?sIR(?>>J#uh-M@`a0jw@;>K$&S(A3`3%uBi4qA- z{CRv6Ese4=YR%5E{e_fX(3k>Em7(#9!PXwiF;pxR- z#Loua6L_3oSQt+vW|+t^7(F082)G*Xc&a=KxH6=9z_ENH#WjH|K{`yp4~AF9tuE4f zE=*~?d`P35gB0IR@wLD;Ab%lnb>N{C?+3g$q^AJa0`3GH>#>2Oy`W2?&!anop$O?z zilc6{V_3JLD+MQ5%W8M@FnTxFL!50^89T(+{M_l3JlfM4n+U!daCQ{Fo6Oo741%`D0$!{s{g^Ln9};{hyx7w02Xka1g3qDhNuDeM zNZl%Uu?PKVH;zGBiQw-;$C(1YD*!PM`%xAmcx6yJ0`Rzw!Mes!kp&EO2!0aaohkkO zAs_9Kg$RB<;IaMacUgUp@(I59Z{n4}V7h)Hx1%}!nN%& zb$K<;dk*v)-}AQO#B;MuKYwto9`^IvKHI8Zw0cJBHZ?T}uM8-TqRA!m0L^BKraHVY z=Kk|MTeMOq#CXs|m)AR*a|Gq~AGZvw+&hJL|Fp;MalVh4<2Y|Mm2XTtY2ZHV+4bcr z>~*6u{2dC+_kE(trSF4Go(_9av%*^s6*cGI5>`DuqWJdVm%IHUR-8$!+V%GQ(A?^$ zsgI1`alWKyTs<}Mv#-~Rn4_1|r?bm%HLkYV?DE2qCKtyNA?Bm*uB*y~xoHI@#eI#( zoScxheoW8$wGsMy0eYvxb6l>q{yOzv6*cQ_Klss^&*lmrrpO;yw!`?$aqiS}E9Q=6 zy{5^|8|N2MZ>^VWG~Egc)+m3hzk}I$ZE1DI`@*iVUgw7QG@CiSA^n+0 z`sSx!CNy|#SP}E-jMkee7dW-$+m^kd$;G)%h&kr!`#uBa_|zOuo^N216gsm=v55OA z>WPnX{~wCg>W*uvls|u<6gTv9dFYipcU9*081T!On(QTueY?JSq1jrcixZ2C2UvF6 zhZ+ zEB4ckQ_Kr2aIF`s_fy$IlS^NJnKk1#Ew=O?uX*K2w(GC0Gwa{)9yVc9g7I>}<%9FK zKe+ZPdEcYLehDSsE;?Ho4aNNCI^UZ6)6zKuhTqKOyvk6;vn{cgezs+5j@F1YtH`#R zopglzGUO-Sf#&`BqH3RuHTzct8s{$Q>pnuA+utpI&-6nx@56t?o;mN9-GqV#t=T!76(mK-bhF;D3 zAg?_6S@NH(oiEI2$gQAMG7`VoCB* zH}lgag+ax$RW)jV^_>5*-sZ%}3Yxv}4c$h}Lstr4l=9||j4FyXa+WVz=Xq-Kq0*7o z$=ndXHJX_Z#5w1!Zi$vS=88k~CYfH8ODz;RrflX}baC9+H73&AD3d0a>^~@+X)tk( z-HRRKQA&FzMW+2U$SYpcE5y9+qTTh8Pnx2|xz;})(tAE}!-N|{P0lReJTtEAY?Gvg zhu7vplBD`8>Vdx@zdp3;wC){X@7KO)l=&gqW$N+ZGj8?SJa8d2up-iAC8% z6V3fsySL1Z3BGsswBL!1y7fz6WiFX)sNZ~a)3lm7IfrY!cC#yeO=r8r#pKWL(nOOB zzge^qvu9aL>a=IR*A~T8FLHQ)xkzV>xI%YF-;+n4eLC(jr_8I*h=yRd`hxWE6L;ks zMMJ8#m%iptE{%R!v`wP>PMTc!&83Z)DKC{Y z)Y)wRgXbKI6Erp*I~+YR{80U{Xul0+^RyQKGAUXo>!94Fbc>TF*Y;jjXquGjd6pY! z6f;0GuIqE53VR1lZl5-(Fn&|Yw{F$^Qr4{a{>9ysk*Cx()5N2iF3#$?bWh-gYXkIl zHM*~TynR^V;1ApSxA>IIH+WQ{{C28O=ApG~_>)~8d;U(7+m|jkK+ikbea-WN_r-cM z6{_C#U^m7FCH!8cFzMVVIlJvTK~<$wT?)cjPp8M)E>rb><6RYVq|7gV;=!$ZH1r*P z&KS_-!f#G(#FWd>W~c~$eDwBleXY?D_2;t}tk@E6ZzI34)mo$VqUpU!73SJ3w>d6O zW0eAT-bp%h{c;zToT&z54%saoaR1x63NOH53s zXrxq4c~{CztcV&JZFnG>@3kc2wK(^4Mw-yxyyqM(wNE(1H}2+yQiZAC}x+|NEF@PBq0dI7Wl*b3(fX%B869e3<7{oU-on z{q^1-ysG39edb2z_w3VqUz&P&)7diL83j?JhbN65T56U`ldDaa%c(rpe0~|jRkdz+ zfUC0Si1fF%&Gjz4$u%XnZz@!2JU%jZ<*PaG(yyxDi7UEfI{60kxT5aoMcd{G2c%b& zcTdFMPQKH34|K)@_x%=Hjgh7 zLxhq|;-&1VLN3=)mt(3c;?3s@f{b-7bp673!F)kb`xj|TtCY|>Ja-(VMCwvM&MDbP znR6Tg`4=*9oyXs2@V68_c<@^R&-Hi)$MZ6tZSfq6XGT2V;aLpNO{fc1;U0s(Yv6AX z_tG`Ml2yQS$R2MYqkYqNOYS-f1$*J zZH*-p0R#5OlX$MYoEe*g1&rD7fc5btLX0I=k`f+;A+caAfj`KNf(JYwPol+GV#>CL z+<16U8i^;j>sbpW;I()Xk(NsPGYh~6C=QB96rMz>r5FxKB5`^W$Cj5fXWL4wCXsv+ z>6VuR*ZgTq+x7g>2Y;`G#PCUs8+rj92WFEfK8bR-mpH+o#5g~RgF^{)58HylAU6R> zq@2QlcgURq5>sz4A-4)h)Ll|yiWZQ22P7`vj@b?tsDT9}a!-{2hTL5svHf;TK7}FC z|Mn7c4}#nike73GFf+%oNN!4yn*@NNMm)JQLGBdD%PqEKbF9D#f8kLWihtc6azldL zAfUz$1k3;3w#6J%wi$5ki4j;2#w%IDQ8Jm8MZih zzLPZHKTIG9r``j?cwD}iN(2l2B0{M5c_IOydKGc~gaIU9%nRqTpqV`Gw+x_=w8~%& zEycn)XNop`qZO18sfmD>z5o zurtU8lMD`QISvwusb;bPCBqGX5-yXW?PDBZ?Fp7-BSoc7?U^QiB1nYuL|l5Nzn_R} zWkJsg5Cn3e)96{;Xc+q3aGn?j5j{VMD~91tQxOy<@DtNBc>KsHp71M;5q{#YFgOV5 zkSKAG4K<7TV9GbfC$7QJZe%Q2V%g>$xmmh4pj3b%V@X#`GYdpU@dJ4QJeH)=H?-2L zff#xct+0ZXwu8|{a~L)jg#eR`lnzHG4U3KgEV9mHp%V}bOoHujGST!>eF|{ut<%~i z!vydW4rtiZJK;0Qs6#Z4zE10In*|`D?Y+}Jpw9>pL{E0wB5J~c80w8BIWu*_BhpC> z0;DgpwWe*OL>ydV#f`=Wm13f!Nzhh@0!!*k^;)3}Y68c6bL*2@5QF<~jEE&UqDaPp i^t2#7A$(&ZO5l=B9|W`~U=muwcLW$AJv;pO{r^9z0pcnE literal 0 HcmV?d00001 diff --git a/web/bridge/index.ts b/web/bridge/index.ts new file mode 100644 index 0000000000..c8e94336a0 --- /dev/null +++ b/web/bridge/index.ts @@ -0,0 +1,33 @@ +import { initializeRedisClients } from './src/redis'; +import { initializeProtobuf } from './src/protobuf'; +import { handleEvent } from './src/eventHandler'; + +async function main() { +// console.log("Hello via Bun!"); + + // Initialize Redis clients + const { redisClient, commandClient } = await initializeRedisClients(); + + // Load Protobuf definitions + const rootPath = '../../nativelink-proto'; + const protoFiles = [ + `${rootPath}/google/devtools/build/v1/publish_build_event.proto`, + `${rootPath}/google/devtools/build/v1/build_event_stream.proto`, + ]; + + const protobufTypes = await initializeProtobuf(rootPath, protoFiles); + + // Subscribe to the build_event channel + await redisClient.subscribe('build_event', async (message: string) => { + await handleEvent(message, commandClient, protobufTypes); + }); + + // Clean up on exit + process.on('SIGINT', async () => { + await redisClient.disconnect(); + await commandClient.disconnect(); + process.exit(); + }); +} + +main().catch(err => console.error(err)); diff --git a/web/bridge/package.json b/web/bridge/package.json new file mode 100644 index 0000000000..83e075c7e5 --- /dev/null +++ b/web/bridge/package.json @@ -0,0 +1,15 @@ +{ + "name": "bridge", + "module": "index.ts", + "type": "module", + "devDependencies": { + "@types/bun": "^1.1.8" + }, + "peerDependencies": { + "typescript": "^5.0.0" + }, + "dependencies": { + "protobufjs": "^7.4.0", + "redis": "^4.7.0" + } +} diff --git a/web/bridge/src/eventHandler.ts b/web/bridge/src/eventHandler.ts new file mode 100644 index 0000000000..993799a8ea --- /dev/null +++ b/web/bridge/src/eventHandler.ts @@ -0,0 +1,111 @@ +import type protobuf from 'protobufjs'; +import { commandOptions } from 'redis'; +import { constructRedisKey, parseMessage } from './utils'; +import { broadcastProgress } from './websocket'; + + +export async function handleEvent(message: string, commandClient: any, types: { PublishBuildToolEventStreamRequest: protobuf.Type, PublishLifecycleEventRequest: protobuf.Type }) { +// console.log(`Received message from build_event channel: ${message}`); + + const parsedMessage = parseMessage(message); +// console.log('Parsed Message:', parsedMessage); + + const redisKey = constructRedisKey(parsedMessage); +// console.log('Constructed Redis Key:', redisKey); + + switch (parsedMessage.eventType) { + case 'LifecycleEvent': + // console.log(`Processing ${parsedMessage.eventType} with key ${redisKey}`); + await fetchAndDecodeBuildData(redisKey, commandClient, types.PublishLifecycleEventRequest); + break; + case 'BuildToolEventStream': + // console.log(`Processing ${parsedMessage.eventType} with key ${redisKey}`); + await fetchAndDecodeBuildData(redisKey, commandClient, types.PublishBuildToolEventStreamRequest); + break; + default: + console.log('Unknown event type:', parsedMessage.eventType); + } +} + +async function fetchAndDecodeBuildData(redisKey: string, commandClient: any, messageType: protobuf.Type) { + try { + const buildData = await commandClient.get(commandOptions({ returnBuffers: true }), redisKey); + if (buildData) { + // console.log(`Fetched build data for key ${redisKey}`); + + const buffer = Buffer.from(buildData); + const decodedMessage = messageType.decode(buffer); + + // Hier wird der `bazelEvent` dekodiert, falls er existiert + if (decodedMessage?.orderedBuildEvent?.event?.bazelEvent) { + const decodedBazelEvent = decodeBazelEvent(decodedMessage.orderedBuildEvent.event.bazelEvent, messageType.root); + // console.log("Decoded Bazel Event:", decodedBazelEvent); + } else { + // console.log("No Bazel Event found."); + } + + // console.log("Decoded String:", decodedMessage.toJSON()); + } else { + // console.log(`No build data found for key ${redisKey}`); + } + } catch (err) { + // console.error(`Error fetching build data for key ${redisKey}:`, err); + } +} + +// biome-ignore lint/suspicious/noExplicitAny: +function decodeBazelEvent(bazelEvent: any, root: protobuf.Root): any { + if (!bazelEvent || !bazelEvent.value) return null; + + // Base64-Dekodierung + const decodedBinaryData = Buffer.from(bazelEvent.value, 'base64'); + + // `type_url` analysieren, um den Nachrichtentyp zu bestimmen + const messageType = root.lookupType(bazelEvent.type_url.split('/').pop()); + + // Nachricht dekodieren + const decodedMessage = messageType.decode(decodedBinaryData); + + // In ein lesbares JSON-Objekt umwandeln + const decodedObject = messageType.toObject(decodedMessage, { + longs: String, + enums: String, + bytes: String, + }); + + // Progress Informationen verarbeiten + if (decodedObject.progress) { + // console.log("Processing progress information..."); + processProgress(decodedObject.progress); + } + + return decodedObject; +} + +// biome-ignore lint/suspicious/noExplicitAny: +function processProgress(progress: any) { +// console.log(progress.stderr) + if (progress.stderr) { + // const cleanStderr = stripAnsi(progress.stderr); + console.log(progress.stderr); + broadcastProgress(progress.stderr) + } + + if (progress.opaqueCount === 1) { + // console.log(`Progress Opaque Count: ${progress.opaqueCount}`); + // console.log(progress.stderr); + } + + if (progress.children) { + // biome-ignore lint/suspicious/noExplicitAny: + progress.children.forEach((child: any, index: number) => { + // console.log(`Child ${index + 1}:`); + if (child.progress && child.progress.opaqueCount ===2 ) { + // console.log(` Child Progress Opaque Count: ${child.progress.opaqueCount}`); + } + // if (child.configuration && child.configuration.id) { + // // console.log(` Child Configuration ID: ${child.configuration.id}`); + // } + }); + } +} diff --git a/web/bridge/src/protobuf.ts b/web/bridge/src/protobuf.ts new file mode 100644 index 0000000000..7ca21bf751 --- /dev/null +++ b/web/bridge/src/protobuf.ts @@ -0,0 +1,19 @@ +import protobuf from 'protobufjs'; + +export async function initializeProtobuf(rootPath: string, protoFiles: string[]) { + const root = new protobuf.Root(); + + root.resolvePath = (origin: string, target: string) => { + if (target.startsWith("google/devtools/build/v1/")) { + return `${rootPath}/${target}`; + } + return protobuf.util.path.resolve(origin, target); + }; + + await root.load(protoFiles); + + const PublishBuildToolEventStreamRequest = root.lookupType("google.devtools.build.v1.PublishBuildToolEventStreamRequest"); + const PublishLifecycleEventRequest = root.lookupType("google.devtools.build.v1.PublishLifecycleEventRequest"); + + return { PublishBuildToolEventStreamRequest, PublishLifecycleEventRequest }; +} diff --git a/web/bridge/src/redis.ts b/web/bridge/src/redis.ts new file mode 100644 index 0000000000..cf36842579 --- /dev/null +++ b/web/bridge/src/redis.ts @@ -0,0 +1,23 @@ +import { createClient } from 'redis'; + +export async function initializeRedisClients() { + try { + const redisClient = createClient(); + const commandClient = redisClient.duplicate(); + + redisClient.on('error', (err) => { + console.error('Redis Client Error:', err); + throw new Error('Failed to connect to Redis.'); + }); + + await redisClient.connect(); + await commandClient.connect(); + + console.log('Redis clients successfully connected.'); + + return { redisClient, commandClient }; + } catch (error) { + console.error('Error during Redis client initialization:', error); + throw new Error('Unable to initialize Redis clients. Please check your connection.'); + } +} diff --git a/web/bridge/src/utils.ts b/web/bridge/src/utils.ts new file mode 100644 index 0000000000..9b8a10e9cc --- /dev/null +++ b/web/bridge/src/utils.ts @@ -0,0 +1,20 @@ +export function parseMessage(message: string) { + const parts = message.split('-'); + + const eventType = parts[0].replace('nativelink.', ''); + const eventID = parts.slice(1, 6).join('-'); + const subEventID = parts.slice(6, 11).join('-'); + const sequenceNumber = parts[11]; + + return { + eventType, + eventID, + subEventID, + sequenceNumber + }; +} + +// biome-ignore lint/suspicious/noExplicitAny: +export function constructRedisKey(parsedMessage: any) { + return `nativelink.${parsedMessage.eventType}-${parsedMessage.eventID}-${parsedMessage.subEventID}-${parsedMessage.sequenceNumber}`; +} diff --git a/web/bridge/src/websocket.ts b/web/bridge/src/websocket.ts new file mode 100644 index 0000000000..b932b900f9 --- /dev/null +++ b/web/bridge/src/websocket.ts @@ -0,0 +1,44 @@ +const clients = new Set(); + +Bun.serve({ + port: 8080, + fetch(req, server) { + // Upgrade the request to a WebSocket + if (server.upgrade(req)) { + return; // Do not return a Response + } + return new Response("Upgrade failed", { status: 500 }); + }, + websocket: { + open(ws) { + console.log('New client connected'); + clients.add(ws); + ws.send("Hello Web Client") + }, + message(ws, message) { + console.log('Received message from web client:', message); + }, + close(ws) { + console.log('Web Client disconnected'); + clients.delete(ws); + }, + drain(ws) { + console.log('Ready to receive more data'); + }, + }, +}); + +export function broadcastProgress(progress: string) { + // Convert the string to a Uint8Array + // const buffer = new TextEncoder().encode(progress); + + const buffer = Buffer.from(progress) + + for (const ws of clients) { + ws.send(new Uint8Array(buffer)); // Send the ArrayBufferView + } +} + + + +console.log('WebSocket server is running on ws://localhost:8080'); diff --git a/web/bridge/tsconfig.json b/web/bridge/tsconfig.json new file mode 100644 index 0000000000..238655f2ce --- /dev/null +++ b/web/bridge/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + // Enable latest features + "lib": ["ESNext", "DOM"], + "target": "ESNext", + "module": "ESNext", + "moduleDetection": "force", + "jsx": "react-jsx", + "allowJs": true, + + // Bundler mode + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "noEmit": true, + + // Best practices + "strict": true, + "skipLibCheck": true, + "noFallthroughCasesInSwitch": true, + + // Some stricter flags (disabled by default) + "noUnusedLocals": false, + "noUnusedParameters": false, + "noPropertyAccessFromIndexSignature": false + } +}