diff --git a/kafka/api.html b/kafka/api.html index 0950780a5..474de75c7 100644 --- a/kafka/api.html +++ b/kafka/api.html @@ -632,6 +632,11 @@

func (c *Consumer) Assignment() (partitions []TopicPartition, err error) +
+ + func (c *Consumer) AssignmentLost() bool + +
func (c *Consumer) Close() (err error) @@ -672,11 +677,26 @@

func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)

+
+ + func (c *Consumer) GetRebalanceProtocol() string + +
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)
+
+ + func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) + +
func (c *Consumer) Logs() chan LogEvent @@ -1315,7 +1335,7 @@

LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client

-
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.5.2.tgz"
+
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.6.1.tgz"

OffsetBeginning represents the earliest offset (logical)

@@ -1339,7 +1359,7 @@

const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)

func - + LibraryVersion

+ WriteErrorCodes

+ AdminClient

+ NewAdminClient

+ NewAdminClientFromConsumer

+ NewAdminClientFromProducer

+ AlterConfigs

+ Close

+ ClusterID

+ ControllerID

+ CreatePartitions

+ CreateTopics

+ DeleteTopics

+ DescribeConfigs

+ GetMetadata

+ SetOAuthBearerToken

+ SetOAuthBearerTokenFailure

+ String

+ AdminOption

+ AdminOptionOperationTimeout

+ SetAdminOperationTimeout

+ AdminOptionRequestTimeout

+ SetAdminRequestTimeout

+ AdminOptionValidateOnly

+ SetAdminValidateOnly

+ AlterConfigsAdminOption

+ AlterOperation

int

func (AlterOperation) - + String

+ AssignedPartitions

+ String

AssignedPartitions) String() string

type - + BrokerMetadata

+ ConfigEntry

+ StringMapToConfigEntries

+ String

+ ConfigEntryResult

+ String

+ ConfigMap

string]ConfigValue

func (ConfigMap) - + Get

+ Set

+ SetKey

+ ConfigResource

+ String

+ ConfigResourceResult

+ String

+ ConfigSource

int

func (ConfigSource) - + String

+ ConfigValue

+ Consumer

+ NewConsumer

+ Assign

Consumer) Assign(partitions []TopicPartition) (err error)

Assign an atomic set of partitions to consume. -This replaces the current assignment. +

+

+ The .Offset field of each TopicPartition must either be set to an absolute +starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +but should typically be set to `kafka.OffsetStored` to have the consumer +use the committed offset as a start position, with a fallback to +`auto.offset.reset` if there is no committed offset. +

+

+ This replaces the current assignment.

func (*Consumer) - + Assignment

+ AssignmentLost + + +

+
func (c *Consumer) AssignmentLost() bool
+

+ AssignmentLost returns true if current partition assignment has been lost. +This method is only applicable for use with a subscribing consumer when +handling a rebalance event or callback. +Partitions that have been lost may already be owned by other members in the +group and therefore commiting offsets, for example, may fail. +

func (*Consumer) - + Close

+ Commit

+ CommitMessage

+ CommitOffsets

+ Committed

+ Events

+ GetConsumerGroupMetadata

+ GetMetadata

+ GetRebalanceProtocol + + +

+
func (c *Consumer) GetRebalanceProtocol() string
+

+ GetRebalanceProtocol returns the current consumer group rebalance protocol, +which is either "EAGER" or "COOPERATIVE". +If the rebalance protocol is not known in the current state an empty string +is returned. +Should typically only be called during rebalancing.

func (*Consumer) - + GetWatermarkOffsets

+ IncrementalAssign + + +

+
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)
+

+ IncrementalAssign adds the specified partitions to the current set of +partitions to consume. +

+

+ The .Offset field of each TopicPartition must either be set to an absolute +starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +but should typically be set to `kafka.OffsetStored` to have the consumer +use the committed offset as a start position, with a fallback to +`auto.offset.reset` if there is no committed offset. +

+

+ The new partitions must not be part of the current assignment. +

+

+ func (*Consumer) + + IncrementalUnassign + + +

+
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)
+

+ IncrementalUnassign removes the specified partitions from the current set of +partitions to consume. +

+

+ The .Offset field of the TopicPartition is ignored. +

+

+ The removed partitions must be part of the current assignment. +

func (*Consumer) - + Logs

+ OffsetsForTimes

+ Pause

+ Poll

+ Position

+ QueryWatermarkOffsets

+ ReadMessage

+ Resume

+ Seek

+ SetOAuthBearerToken

+ SetOAuthBearerTokenFailure

+ StoreOffsets

+ String

+ Subscribe

+ SubscribeTopics

+ Subscription

+ Unassign

+ Unsubscribe

+ ConsumerGroupMetadata

+ NewTestConsumerGroupMetadata

+ CreatePartitionsAdminOption

+ CreateTopicsAdminOption

+ DeleteTopicsAdminOption

+ DescribeConfigsAdminOption

+ Error

+ NewError

+ Code

+ Error

+ IsFatal

+ IsRetriable

+ String

+ TxnRequiresAbort

+ ErrorCode

ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED) // ErrApplication Local: Application generated error ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION) + // ErrAssignmentLost Local: Group partition assignment lost + ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST) + // ErrNoop Local: No operation performed + ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP) + // ErrAutoOffsetReset Local: No offset to automatically reset to + ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) // ErrUnknown Unknown broker error ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) // ErrNoError Success @@ -3291,10 +3404,28 @@

ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD) // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) + // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded + ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED) + // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one + ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED) + // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist + ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND) + // ErrDuplicateResource Broker: Request illegally referred to the same resource twice + ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE) + // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability + ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL) + // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters + ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET) + // ErrInvalidUpdateVersion Broker: Invalid update version + ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION) + // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error + ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED) + // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding + ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE) )

func (ErrorCode) - + String

+ Event

+ Handle