mirror of
https://github.com/1f349/dendrite.git
synced 2024-11-08 18:16:59 +00:00
Add kafka and postgres clients to vendor directory
This commit is contained in:
parent
2f965c6b33
commit
a78e0cba8e
80
vendor/manifest
vendored
Normal file
80
vendor/manifest
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
{
|
||||
"version": 0,
|
||||
"dependencies": [
|
||||
{
|
||||
"importpath": "github.com/Shopify/sarama",
|
||||
"repository": "https://github.com/Shopify/sarama",
|
||||
"revision": "574d3147eee384229bf96a5d12c207fe7b5234f3",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/davecgh/go-spew/spew",
|
||||
"repository": "https://github.com/davecgh/go-spew",
|
||||
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
|
||||
"branch": "master",
|
||||
"path": "/spew"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/eapache/go-resiliency/breaker",
|
||||
"repository": "https://github.com/eapache/go-resiliency",
|
||||
"revision": "b86b1ec0dd4209a588dc1285cdd471e73525c0b3",
|
||||
"branch": "master",
|
||||
"path": "/breaker"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/eapache/go-xerial-snappy",
|
||||
"repository": "https://github.com/eapache/go-xerial-snappy",
|
||||
"revision": "bb955e01b9346ac19dc29eb16586c90ded99a98c",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/eapache/queue",
|
||||
"repository": "https://github.com/eapache/queue",
|
||||
"revision": "44cc805cf13205b55f69e14bcb69867d1ae92f98",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/golang/snappy",
|
||||
"repository": "https://github.com/golang/snappy",
|
||||
"revision": "7db9049039a047d955fe8c19b83c8ff5abd765c7",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/klauspost/crc32",
|
||||
"repository": "https://github.com/klauspost/crc32",
|
||||
"revision": "cb6bfca970f6908083f26f39a79009d608efd5cd",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/lib/pq",
|
||||
"repository": "https://github.com/lib/pq",
|
||||
"revision": "a6657b2386e9b8be76484c08711b02c7cf867ead",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/pierrec/lz4",
|
||||
"repository": "https://github.com/pierrec/lz4",
|
||||
"revision": "5c9560bfa9ace2bf86080bf40d46b34ae44604df",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/pierrec/xxHash/xxHash32",
|
||||
"repository": "https://github.com/pierrec/xxHash",
|
||||
"revision": "5a004441f897722c627870a981d02b29924215fa",
|
||||
"branch": "master",
|
||||
"path": "/xxHash32"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/rcrowley/go-metrics",
|
||||
"repository": "https://github.com/rcrowley/go-metrics",
|
||||
"revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "gopkg.in/Shopify/sarama.v1",
|
||||
"repository": "https://gopkg.in/Shopify/sarama.v1",
|
||||
"revision": "0fb560e5f7fbcaee2f75e3c34174320709f69944",
|
||||
"branch": "master"
|
||||
}
|
||||
]
|
||||
}
|
355
vendor/src/github.com/Shopify/sarama/CHANGELOG.md
vendored
Normal file
355
vendor/src/github.com/Shopify/sarama/CHANGELOG.md
vendored
Normal file
@ -0,0 +1,355 @@
|
||||
# Changelog
|
||||
|
||||
#### Version 1.11.0 (2016-12-20)
|
||||
|
||||
_Important:_ As of Sarama 1.11 it is necessary to set the config value of
|
||||
`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
|
||||
versions would silently override this value when instantiating a SyncProducer
|
||||
which led to unexpected values and data races.
|
||||
|
||||
New Features:
|
||||
- Metrics! Thanks to Sébastien Launay for all his work on this feature
|
||||
([#701](https://github.com/Shopify/sarama/pull/701),
|
||||
[#746](https://github.com/Shopify/sarama/pull/746),
|
||||
[#766](https://github.com/Shopify/sarama/pull/766)).
|
||||
- Add support for LZ4 compression
|
||||
([#786](https://github.com/Shopify/sarama/pull/786)).
|
||||
- Add support for ListOffsetRequest v1 and Kafka 0.10.1
|
||||
([#775](https://github.com/Shopify/sarama/pull/775)).
|
||||
- Added a `HighWaterMarks` method to the Consumer which aggregates the
|
||||
`HighWaterMarkOffset` values of its child topic/partitions
|
||||
([#769](https://github.com/Shopify/sarama/pull/769)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fixed producing when using timestamps, compression and Kafka 0.10
|
||||
([#759](https://github.com/Shopify/sarama/pull/759)).
|
||||
- Added missing decoder methods to DescribeGroups response
|
||||
([#756](https://github.com/Shopify/sarama/pull/756)).
|
||||
- Fix producer shutdown when `Return.Errors` is disabled
|
||||
([#787](https://github.com/Shopify/sarama/pull/787)).
|
||||
- Don't mutate configuration in SyncProducer
|
||||
([#790](https://github.com/Shopify/sarama/pull/790)).
|
||||
- Fix crash on SASL initialization failure
|
||||
([#795](https://github.com/Shopify/sarama/pull/795)).
|
||||
|
||||
#### Version 1.10.1 (2016-08-30)
|
||||
|
||||
Bug Fixes:
|
||||
- Fix the documentation for `HashPartitioner` which was incorrect
|
||||
([#717](https://github.com/Shopify/sarama/pull/717)).
|
||||
- Permit client creation even when it is limited by ACLs
|
||||
([#722](https://github.com/Shopify/sarama/pull/722)).
|
||||
- Several fixes to the consumer timer optimization code, regressions introduced
|
||||
in v1.10.0. Go's timers are finicky
|
||||
([#730](https://github.com/Shopify/sarama/pull/730),
|
||||
[#733](https://github.com/Shopify/sarama/pull/733),
|
||||
[#734](https://github.com/Shopify/sarama/pull/734)).
|
||||
- Handle consuming compressed relative offsets with Kafka 0.10
|
||||
([#735](https://github.com/Shopify/sarama/pull/735)).
|
||||
|
||||
#### Version 1.10.0 (2016-08-02)
|
||||
|
||||
_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
|
||||
Kafka you are running against (via the `config.Version` value) in order to use
|
||||
features that may not be compatible with old Kafka versions. If you don't
|
||||
specify this value it will default to 0.8.2 (the minimum supported), and trying
|
||||
to use more recent features (like the offset manager) will fail with an error.
|
||||
|
||||
_Also:_ The offset-manager's behaviour has been changed to match the upstream
|
||||
java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
|
||||
[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
|
||||
offset-manager, please ensure that you are committing one *greater* than the
|
||||
last consumed message offset or else you may end up consuming duplicate
|
||||
messages.
|
||||
|
||||
New Features:
|
||||
- Support for Kafka 0.10
|
||||
([#672](https://github.com/Shopify/sarama/pull/672),
|
||||
[#678](https://github.com/Shopify/sarama/pull/678),
|
||||
[#681](https://github.com/Shopify/sarama/pull/681), and others).
|
||||
- Support for configuring the target Kafka version
|
||||
([#676](https://github.com/Shopify/sarama/pull/676)).
|
||||
- Batch producing support in the SyncProducer
|
||||
([#677](https://github.com/Shopify/sarama/pull/677)).
|
||||
- Extend producer mock to allow setting expectations on message contents
|
||||
([#667](https://github.com/Shopify/sarama/pull/667)).
|
||||
|
||||
Improvements:
|
||||
- Support `nil` compressed messages for deleting in compacted topics
|
||||
([#634](https://github.com/Shopify/sarama/pull/634)).
|
||||
- Pre-allocate decoding errors, greatly reducing heap usage and GC time against
|
||||
misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
|
||||
- Re-use consumer expiry timers, removing one allocation per consumed message
|
||||
([#707](https://github.com/Shopify/sarama/pull/707)).
|
||||
|
||||
Bug Fixes:
|
||||
- Actually default the client ID to "sarama" like we say we do
|
||||
([#664](https://github.com/Shopify/sarama/pull/664)).
|
||||
- Fix a rare issue where `Client.Leader` could return the wrong error
|
||||
([#685](https://github.com/Shopify/sarama/pull/685)).
|
||||
- Fix a possible tight loop in the consumer
|
||||
([#693](https://github.com/Shopify/sarama/pull/693)).
|
||||
- Match upstream's offset-tracking behaviour
|
||||
([#705](https://github.com/Shopify/sarama/pull/705)).
|
||||
- Report UnknownTopicOrPartition errors from the offset manager
|
||||
([#706](https://github.com/Shopify/sarama/pull/706)).
|
||||
- Fix possible negative partition value from the HashPartitioner
|
||||
([#709](https://github.com/Shopify/sarama/pull/709)).
|
||||
|
||||
#### Version 1.9.0 (2016-05-16)
|
||||
|
||||
New Features:
|
||||
- Add support for custom offset manager retention durations
|
||||
([#602](https://github.com/Shopify/sarama/pull/602)).
|
||||
- Publish low-level mocks to enable testing of third-party producer/consumer
|
||||
implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
|
||||
- Declare support for Golang 1.6
|
||||
([#611](https://github.com/Shopify/sarama/pull/611)).
|
||||
- Support for SASL plain-text auth
|
||||
([#648](https://github.com/Shopify/sarama/pull/648)).
|
||||
|
||||
Improvements:
|
||||
- Simplified broker locking scheme slightly
|
||||
([#604](https://github.com/Shopify/sarama/pull/604)).
|
||||
- Documentation cleanup
|
||||
([#605](https://github.com/Shopify/sarama/pull/605),
|
||||
[#621](https://github.com/Shopify/sarama/pull/621),
|
||||
[#654](https://github.com/Shopify/sarama/pull/654)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix race condition shutting down the OffsetManager
|
||||
([#658](https://github.com/Shopify/sarama/pull/658)).
|
||||
|
||||
#### Version 1.8.0 (2016-02-01)
|
||||
|
||||
New Features:
|
||||
- Full support for Kafka 0.9:
|
||||
- All protocol messages and fields
|
||||
([#586](https://github.com/Shopify/sarama/pull/586),
|
||||
[#588](https://github.com/Shopify/sarama/pull/588),
|
||||
[#590](https://github.com/Shopify/sarama/pull/590)).
|
||||
- Verified that TLS support works
|
||||
([#581](https://github.com/Shopify/sarama/pull/581)).
|
||||
- Fixed the OffsetManager compatibility
|
||||
([#585](https://github.com/Shopify/sarama/pull/585)).
|
||||
|
||||
Improvements:
|
||||
- Optimize for fewer system calls when reading from the network
|
||||
([#584](https://github.com/Shopify/sarama/pull/584)).
|
||||
- Automatically retry `InvalidMessage` errors to match upstream behaviour
|
||||
([#589](https://github.com/Shopify/sarama/pull/589)).
|
||||
|
||||
#### Version 1.7.0 (2015-12-11)
|
||||
|
||||
New Features:
|
||||
- Preliminary support for Kafka 0.9
|
||||
([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
|
||||
caveats:
|
||||
- Protocol-layer support is mostly in place
|
||||
([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
|
||||
renamed some messages and fields, which we did not in order to preserve API
|
||||
compatibility.
|
||||
- The producer and consumer work against 0.9, but the offset manager does
|
||||
not ([#573](https://github.com/Shopify/sarama/pull/573)).
|
||||
- TLS support may or may not work
|
||||
([#581](https://github.com/Shopify/sarama/pull/581)).
|
||||
|
||||
Improvements:
|
||||
- Don't wait for request timeouts on dead brokers, greatly speeding recovery
|
||||
when the TCP connection is left hanging
|
||||
([#548](https://github.com/Shopify/sarama/pull/548)).
|
||||
- Refactored part of the producer. The new version provides a much more elegant
|
||||
solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
|
||||
slightly more efficient, and much more precise in calculating batch sizes
|
||||
when compression is used
|
||||
([#549](https://github.com/Shopify/sarama/pull/549),
|
||||
[#550](https://github.com/Shopify/sarama/pull/550),
|
||||
[#551](https://github.com/Shopify/sarama/pull/551)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix race condition in consumer test mock
|
||||
([#553](https://github.com/Shopify/sarama/pull/553)).
|
||||
|
||||
#### Version 1.6.1 (2015-09-25)
|
||||
|
||||
Bug Fixes:
|
||||
- Fix panic that could occur if a user-supplied message value failed to encode
|
||||
([#449](https://github.com/Shopify/sarama/pull/449)).
|
||||
|
||||
#### Version 1.6.0 (2015-09-04)
|
||||
|
||||
New Features:
|
||||
- Implementation of a consumer offset manager using the APIs introduced in
|
||||
Kafka 0.8.2. The API is designed mainly for integration into a future
|
||||
high-level consumer, not for direct use, although it is *possible* to use it
|
||||
directly.
|
||||
([#461](https://github.com/Shopify/sarama/pull/461)).
|
||||
|
||||
Improvements:
|
||||
- CRC32 calculation is much faster on machines with SSE4.2 instructions,
|
||||
removing a major hotspot from most profiles
|
||||
([#255](https://github.com/Shopify/sarama/pull/255)).
|
||||
|
||||
Bug Fixes:
|
||||
- Make protocol decoding more robust against some malformed packets generated
|
||||
by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
|
||||
[#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
|
||||
([#528](https://github.com/Shopify/sarama/pull/528)).
|
||||
- Fix a potential race condition panic in the consumer on shutdown
|
||||
([#529](https://github.com/Shopify/sarama/pull/529)).
|
||||
|
||||
#### Version 1.5.0 (2015-08-17)
|
||||
|
||||
New Features:
|
||||
- TLS-encrypted network connections are now supported. This feature is subject
|
||||
to change when Kafka releases built-in TLS support, but for now this is
|
||||
enough to work with TLS-terminating proxies
|
||||
([#154](https://github.com/Shopify/sarama/pull/154)).
|
||||
|
||||
Improvements:
|
||||
- The consumer will not block if a single partition is not drained by the user;
|
||||
all other partitions will continue to consume normally
|
||||
([#485](https://github.com/Shopify/sarama/pull/485)).
|
||||
- Formatting of error strings has been much improved
|
||||
([#495](https://github.com/Shopify/sarama/pull/495)).
|
||||
- Internal refactoring of the producer for code cleanliness and to enable
|
||||
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix a potential deadlock in the consumer on shutdown
|
||||
([#475](https://github.com/Shopify/sarama/pull/475)).
|
||||
|
||||
#### Version 1.4.3 (2015-07-21)
|
||||
|
||||
Bug Fixes:
|
||||
- Don't include the partitioner in the producer's "fetch partitions"
|
||||
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
|
||||
- Don't retry messages until the broker is closed when abandoning a broker in
|
||||
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
|
||||
- Update the import path for snappy-go, it has moved again and the API has
|
||||
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
|
||||
|
||||
#### Version 1.4.2 (2015-05-27)
|
||||
|
||||
Bug Fixes:
|
||||
- Update the import path for snappy-go, it has moved from google code to github
|
||||
([#456](https://github.com/Shopify/sarama/pull/456)).
|
||||
|
||||
#### Version 1.4.1 (2015-05-25)
|
||||
|
||||
Improvements:
|
||||
- Optimizations when decoding snappy messages, thanks to John Potocny
|
||||
([#446](https://github.com/Shopify/sarama/pull/446)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix hypothetical race conditions on producer shutdown
|
||||
([#450](https://github.com/Shopify/sarama/pull/450),
|
||||
[#451](https://github.com/Shopify/sarama/pull/451)).
|
||||
|
||||
#### Version 1.4.0 (2015-05-01)
|
||||
|
||||
New Features:
|
||||
- The consumer now implements `Topics()` and `Partitions()` methods to enable
|
||||
users to dynamically choose what topics/partitions to consume without
|
||||
instantiating a full client
|
||||
([#431](https://github.com/Shopify/sarama/pull/431)).
|
||||
- The partition-consumer now exposes the high water mark offset value returned
|
||||
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
|
||||
- Added a `kafka-console-consumer` tool capable of handling multiple
|
||||
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
|
||||
([#439](https://github.com/Shopify/sarama/pull/439),
|
||||
[#442](https://github.com/Shopify/sarama/pull/442)).
|
||||
|
||||
Improvements:
|
||||
- The producer's logging during retry scenarios is more consistent, more
|
||||
useful, and slightly less verbose
|
||||
([#429](https://github.com/Shopify/sarama/pull/429)).
|
||||
- The client now shuffles its initial list of seed brokers in order to prevent
|
||||
thundering herd on the first broker in the list
|
||||
([#441](https://github.com/Shopify/sarama/pull/441)).
|
||||
|
||||
Bug Fixes:
|
||||
- The producer now correctly manages its state if retries occur when it is
|
||||
shutting down, fixing several instances of confusing behaviour and at least
|
||||
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
|
||||
- The consumer now handles messages for different partitions asynchronously,
|
||||
making it much more resilient to specific user code ordering
|
||||
([#325](https://github.com/Shopify/sarama/pull/325)).
|
||||
|
||||
#### Version 1.3.0 (2015-04-16)
|
||||
|
||||
New Features:
|
||||
- The client now tracks consumer group coordinators using
|
||||
ConsumerMetadataRequests similar to how it tracks partition leadership using
|
||||
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
|
||||
This adds two methods to the client API:
|
||||
- `Coordinator(consumerGroup string) (*Broker, error)`
|
||||
- `RefreshCoordinator(consumerGroup string) error`
|
||||
|
||||
Improvements:
|
||||
- ConsumerMetadataResponses now automatically create a Broker object out of the
|
||||
ID/address/port combination for the Coordinator; accessing the fields
|
||||
individually has been deprecated
|
||||
([#413](https://github.com/Shopify/sarama/pull/413)).
|
||||
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
|
||||
Consumers will fail to start if the provided offset is out of range
|
||||
([#418](https://github.com/Shopify/sarama/pull/418))
|
||||
and they will automatically shut down if the offset falls out of range
|
||||
([#424](https://github.com/Shopify/sarama/pull/424)).
|
||||
- Small performance improvement in encoding and decoding protocol messages
|
||||
([#427](https://github.com/Shopify/sarama/pull/427)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix a rare race condition in the client's background metadata refresher if
|
||||
it happens to be activated while the client is being closed
|
||||
([#422](https://github.com/Shopify/sarama/pull/422)).
|
||||
|
||||
#### Version 1.2.0 (2015-04-07)
|
||||
|
||||
Improvements:
|
||||
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
|
||||
([#389](https://github.com/Shopify/sarama/pull/389)).
|
||||
- The producer is now somewhat more memory-efficient during and after retrying
|
||||
messages due to an improved queue implementation
|
||||
([#396](https://github.com/Shopify/sarama/pull/396)).
|
||||
- The consumer produces much more useful logging output when leadership
|
||||
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
|
||||
- The client's `GetOffset` method will now automatically refresh metadata and
|
||||
retry once in the event of stale information or similar
|
||||
([#394](https://github.com/Shopify/sarama/pull/394)).
|
||||
- Broker connections now have support for using TCP keepalives
|
||||
([#407](https://github.com/Shopify/sarama/issues/407)).
|
||||
|
||||
Bug Fixes:
|
||||
- The OffsetCommitRequest message now correctly implements all three possible
|
||||
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
|
||||
[#400](https://github.com/Shopify/sarama/pull/400)).
|
||||
|
||||
#### Version 1.1.0 (2015-03-20)
|
||||
|
||||
Improvements:
|
||||
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
|
||||
broken topics don't choke throughput
|
||||
([#373](https://github.com/Shopify/sarama/pull/373)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix the producer's internal reference counting in certain unusual scenarios
|
||||
([#367](https://github.com/Shopify/sarama/pull/367)).
|
||||
- Fix the consumer's internal reference counting in certain unusual scenarios
|
||||
([#369](https://github.com/Shopify/sarama/pull/369)).
|
||||
- Fix a condition where the producer's internal control messages could have
|
||||
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
|
||||
- Fix an issue where invalid partition lists would be cached when asking for
|
||||
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
|
||||
|
||||
|
||||
#### Version 1.0.0 (2015-03-17)
|
||||
|
||||
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
|
||||
|
||||
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
|
||||
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
|
||||
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
|
||||
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
|
||||
- All the configuration values have been unified in the `Config` struct.
|
||||
- Much improved test suite.
|
20
vendor/src/github.com/Shopify/sarama/MIT-LICENSE
vendored
Normal file
20
vendor/src/github.com/Shopify/sarama/MIT-LICENSE
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (c) 2013 Evan Huus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
21
vendor/src/github.com/Shopify/sarama/Makefile
vendored
Normal file
21
vendor/src/github.com/Shopify/sarama/Makefile
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
default: fmt vet errcheck test
|
||||
|
||||
test:
|
||||
go test -v -timeout 60s -race ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
errcheck:
|
||||
errcheck github.com/Shopify/sarama/...
|
||||
|
||||
fmt:
|
||||
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
|
||||
|
||||
install_dependencies: install_errcheck get
|
||||
|
||||
install_errcheck:
|
||||
go get github.com/kisielk/errcheck
|
||||
|
||||
get:
|
||||
go get -t
|
36
vendor/src/github.com/Shopify/sarama/README.md
vendored
Normal file
36
vendor/src/github.com/Shopify/sarama/README.md
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
sarama
|
||||
======
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
|
||||
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
|
||||
|
||||
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
|
||||
|
||||
### Getting started
|
||||
|
||||
- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
|
||||
- Mocks for testing are available in the [mocks](./mocks) subpackage.
|
||||
- The [examples](./examples) directory contains more elaborate example applications.
|
||||
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
|
||||
|
||||
### Compatibility and API stability
|
||||
|
||||
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
|
||||
the two latest stable releases of Kafka and Go, and we provide a two month
|
||||
grace period for older releases. This means we currently officially support
|
||||
Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are
|
||||
still likely to work.
|
||||
|
||||
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
|
||||
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
|
||||
A changelog is available [here](CHANGELOG.md).
|
||||
|
||||
### Contributing
|
||||
|
||||
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md).
|
||||
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
|
||||
technical and design details.
|
||||
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
|
||||
contains a wealth of useful information.
|
||||
* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
|
||||
* If you have any questions, just ask!
|
20
vendor/src/github.com/Shopify/sarama/Vagrantfile
vendored
Normal file
20
vendor/src/github.com/Shopify/sarama/Vagrantfile
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
|
||||
MEMORY = 3072
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
|
||||
config.vm.provision :shell, path: "vagrant/provision.sh"
|
||||
|
||||
config.vm.network "private_network", ip: "192.168.100.67"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = MEMORY
|
||||
end
|
||||
end
|
24
vendor/src/github.com/Shopify/sarama/api_versions_request.go
vendored
Normal file
24
vendor/src/github.com/Shopify/sarama/api_versions_request.go
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package sarama
|
||||
|
||||
type ApiVersionsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
14
vendor/src/github.com/Shopify/sarama/api_versions_request_test.go
vendored
Normal file
14
vendor/src/github.com/Shopify/sarama/api_versions_request_test.go
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
apiVersionRequest = []byte{}
|
||||
)
|
||||
|
||||
func TestApiVersionsRequest(t *testing.T) {
|
||||
var request *ApiVersionsRequest
|
||||
|
||||
request = new(ApiVersionsRequest)
|
||||
testRequest(t, "basic", request, apiVersionRequest)
|
||||
}
|
86
vendor/src/github.com/Shopify/sarama/api_versions_response.go
vendored
Normal file
86
vendor/src/github.com/Shopify/sarama/api_versions_response.go
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package sarama
|
||||
|
||||
type ApiVersionsResponseBlock struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt16(b.ApiKey)
|
||||
pe.putInt16(b.MinVersion)
|
||||
pe.putInt16(b.MaxVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
|
||||
var err error
|
||||
|
||||
if b.ApiKey, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MinVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MaxVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ApiVersionsResponse struct {
|
||||
Err KError
|
||||
ApiVersions []*ApiVersionsResponseBlock
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, apiVersion := range r.ApiVersions {
|
||||
if err := apiVersion.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
|
||||
for i := 0; i < numBlocks; i++ {
|
||||
block := new(ApiVersionsResponseBlock)
|
||||
if err := block.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.ApiVersions[i] = block
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
32
vendor/src/github.com/Shopify/sarama/api_versions_response_test.go
vendored
Normal file
32
vendor/src/github.com/Shopify/sarama/api_versions_response_test.go
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
apiVersionResponse = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x03,
|
||||
0x00, 0x02,
|
||||
0x00, 0x01,
|
||||
}
|
||||
)
|
||||
|
||||
func TestApiVersionsResponse(t *testing.T) {
|
||||
var response *ApiVersionsResponse
|
||||
|
||||
response = new(ApiVersionsResponse)
|
||||
testVersionDecodable(t, "no error", response, apiVersionResponse, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding error failed: no error expected but found", response.Err)
|
||||
}
|
||||
if response.ApiVersions[0].ApiKey != 0x03 {
|
||||
t.Error("Decoding error: expected 0x03 but got", response.ApiVersions[0].ApiKey)
|
||||
}
|
||||
if response.ApiVersions[0].MinVersion != 0x02 {
|
||||
t.Error("Decoding error: expected 0x02 but got", response.ApiVersions[0].MinVersion)
|
||||
}
|
||||
if response.ApiVersions[0].MaxVersion != 0x01 {
|
||||
t.Error("Decoding error: expected 0x01 but got", response.ApiVersions[0].MaxVersion)
|
||||
}
|
||||
}
|
905
vendor/src/github.com/Shopify/sarama/async_producer.go
vendored
Normal file
905
vendor/src/github.com/Shopify/sarama/async_producer.go
vendored
Normal file
@ -0,0 +1,905 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-resiliency/breaker"
|
||||
"github.com/eapache/queue"
|
||||
)
|
||||
|
||||
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
|
||||
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
|
||||
// and parses responses for errors. You must read from the Errors() channel or the
|
||||
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
|
||||
// leaks: it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
type AsyncProducer interface {
|
||||
|
||||
// AsyncClose triggers a shutdown of the producer, flushing any messages it may
|
||||
// have buffered. The shutdown has completed when both the Errors and Successes
|
||||
// channels have been closed. When calling AsyncClose, you *must* continue to
|
||||
// read from those channels in order to drain the results of any messages in
|
||||
// flight.
|
||||
AsyncClose()
|
||||
|
||||
// Close shuts down the producer and flushes any messages it may have buffered.
|
||||
// You must call this function before a producer object passes out of scope, as
|
||||
// it may otherwise leak memory. You must call this before calling Close on the
|
||||
// underlying client.
|
||||
Close() error
|
||||
|
||||
// Input is the input channel for the user to write messages to that they
|
||||
// wish to send.
|
||||
Input() chan<- *ProducerMessage
|
||||
|
||||
// Successes is the success output channel back to the user when AckSuccesses is
|
||||
// enabled. If Return.Successes is true, you MUST read from this channel or the
|
||||
// Producer will deadlock. It is suggested that you send and read messages
|
||||
// together in a single select statement.
|
||||
Successes() <-chan *ProducerMessage
|
||||
|
||||
// Errors is the error output channel back to the user. You MUST read from this
|
||||
// channel or the Producer will deadlock when the channel is full. Alternatively,
|
||||
// you can set Producer.Return.Errors in your config to false, which prevents
|
||||
// errors to be returned.
|
||||
Errors() <-chan *ProducerError
|
||||
}
|
||||
|
||||
type asyncProducer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
errors chan *ProducerError
|
||||
input, successes, retries chan *ProducerMessage
|
||||
inFlight sync.WaitGroup
|
||||
|
||||
brokers map[*Broker]chan<- *ProducerMessage
|
||||
brokerRefs map[chan<- *ProducerMessage]int
|
||||
brokerLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
|
||||
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
|
||||
client, err := NewClient(addrs, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.(*asyncProducer).ownClient = true
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
p := &asyncProducer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
errors: make(chan *ProducerError),
|
||||
input: make(chan *ProducerMessage),
|
||||
successes: make(chan *ProducerMessage),
|
||||
retries: make(chan *ProducerMessage),
|
||||
brokers: make(map[*Broker]chan<- *ProducerMessage),
|
||||
brokerRefs: make(map[chan<- *ProducerMessage]int),
|
||||
}
|
||||
|
||||
// launch our singleton dispatchers
|
||||
go withRecover(p.dispatcher)
|
||||
go withRecover(p.retryHandler)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type flagSet int8
|
||||
|
||||
const (
|
||||
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
|
||||
fin // final message from partitionProducer to brokerProducer and back
|
||||
shutdown // start the shutdown process
|
||||
)
|
||||
|
||||
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
|
||||
type ProducerMessage struct {
|
||||
Topic string // The Kafka topic for this message.
|
||||
// The partitioning key for this message. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Key Encoder
|
||||
// The actual message to store in Kafka. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Value Encoder
|
||||
|
||||
// This field is used to hold arbitrary data you wish to include so it
|
||||
// will be available when receiving on the Successes and Errors channels.
|
||||
// Sarama completely ignores this field and is only to be used for
|
||||
// pass-through data.
|
||||
Metadata interface{}
|
||||
|
||||
// Below this point are filled in by the producer as the message is processed
|
||||
|
||||
// Offset is the offset of the message stored on the broker. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered and
|
||||
// RequiredAcks is not NoResponse.
|
||||
Offset int64
|
||||
// Partition is the partition that the message was sent to. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered.
|
||||
Partition int32
|
||||
// Timestamp is the timestamp assigned to the message by the broker. This
|
||||
// is only guaranteed to be defined if the message was successfully
|
||||
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
|
||||
// least version 0.10.0.
|
||||
Timestamp time.Time
|
||||
|
||||
retries int
|
||||
flags flagSet
|
||||
}
|
||||
|
||||
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
|
||||
|
||||
func (m *ProducerMessage) byteSize() int {
|
||||
size := producerMessageOverhead
|
||||
if m.Key != nil {
|
||||
size += m.Key.Length()
|
||||
}
|
||||
if m.Value != nil {
|
||||
size += m.Value.Length()
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *ProducerMessage) clear() {
|
||||
m.flags = 0
|
||||
m.retries = 0
|
||||
}
|
||||
|
||||
// ProducerError is the type of error generated when the producer fails to deliver a message.
|
||||
// It contains the original ProducerMessage as well as the actual error value.
|
||||
type ProducerError struct {
|
||||
Msg *ProducerMessage
|
||||
Err error
|
||||
}
|
||||
|
||||
func (pe ProducerError) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
|
||||
}
|
||||
|
||||
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
|
||||
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
|
||||
// when closing a producer.
|
||||
type ProducerErrors []*ProducerError
|
||||
|
||||
func (pe ProducerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Errors() <-chan *ProducerError {
|
||||
return p.errors
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
|
||||
return p.successes
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Input() chan<- *ProducerMessage {
|
||||
return p.input
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Close() error {
|
||||
p.AsyncClose()
|
||||
|
||||
if p.conf.Producer.Return.Successes {
|
||||
go withRecover(func() {
|
||||
for _ = range p.successes {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var errors ProducerErrors
|
||||
if p.conf.Producer.Return.Errors {
|
||||
for event := range p.errors {
|
||||
errors = append(errors, event)
|
||||
}
|
||||
} else {
|
||||
<-p.errors
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *asyncProducer) AsyncClose() {
|
||||
go withRecover(p.shutdown)
|
||||
}
|
||||
|
||||
// singleton
|
||||
// dispatches messages by topic
|
||||
func (p *asyncProducer) dispatcher() {
|
||||
handlers := make(map[string]chan<- *ProducerMessage)
|
||||
shuttingDown := false
|
||||
|
||||
for msg := range p.input {
|
||||
if msg == nil {
|
||||
Logger.Println("Something tried to send a nil message, it was ignored.")
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.flags&shutdown != 0 {
|
||||
shuttingDown = true
|
||||
p.inFlight.Done()
|
||||
continue
|
||||
} else if msg.retries == 0 {
|
||||
if shuttingDown {
|
||||
// we can't just call returnError here because that decrements the wait group,
|
||||
// which hasn't been incremented yet for this message, and shouldn't be
|
||||
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.inFlight.Add(1)
|
||||
}
|
||||
|
||||
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
|
||||
p.returnError(msg, ErrMessageSizeTooLarge)
|
||||
continue
|
||||
}
|
||||
|
||||
handler := handlers[msg.Topic]
|
||||
if handler == nil {
|
||||
handler = p.newTopicProducer(msg.Topic)
|
||||
handlers[msg.Topic] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// one per topic
|
||||
// partitions messages, then dispatches them by partition
|
||||
type topicProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
breaker *breaker.Breaker
|
||||
handlers map[int32]chan<- *ProducerMessage
|
||||
partitioner Partitioner
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
tp := &topicProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
input: input,
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
handlers: make(map[int32]chan<- *ProducerMessage),
|
||||
partitioner: p.conf.Producer.Partitioner(topic),
|
||||
}
|
||||
go withRecover(tp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (tp *topicProducer) dispatch() {
|
||||
for msg := range tp.input {
|
||||
if msg.retries == 0 {
|
||||
if err := tp.partitionMessage(msg); err != nil {
|
||||
tp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
handler := tp.handlers[msg.Partition]
|
||||
if handler == nil {
|
||||
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
|
||||
tp.handlers[msg.Partition] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range tp.handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
|
||||
var partitions []int32
|
||||
|
||||
err := tp.breaker.Run(func() (err error) {
|
||||
if tp.partitioner.RequiresConsistency() {
|
||||
partitions, err = tp.parent.client.Partitions(msg.Topic)
|
||||
} else {
|
||||
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
|
||||
}
|
||||
return
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numPartitions := int32(len(partitions))
|
||||
|
||||
if numPartitions == 0 {
|
||||
return ErrLeaderNotAvailable
|
||||
}
|
||||
|
||||
choice, err := tp.partitioner.Partition(msg, numPartitions)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if choice < 0 || choice >= numPartitions {
|
||||
return ErrInvalidPartition
|
||||
}
|
||||
|
||||
msg.Partition = partitions[choice]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// one per partition per topic
|
||||
// dispatches messages to the appropriate broker
|
||||
// also responsible for maintaining message order during retries
|
||||
type partitionProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
partition int32
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
leader *Broker
|
||||
breaker *breaker.Breaker
|
||||
output chan<- *ProducerMessage
|
||||
|
||||
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
|
||||
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
|
||||
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
|
||||
// therefore whether our buffer is complete and safe to flush)
|
||||
highWatermark int
|
||||
retryState []partitionRetryState
|
||||
}
|
||||
|
||||
type partitionRetryState struct {
|
||||
buf []*ProducerMessage
|
||||
expectChaser bool
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
pp := &partitionProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
input: input,
|
||||
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
|
||||
}
|
||||
go withRecover(pp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) dispatch() {
|
||||
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
|
||||
// on the first message
|
||||
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
|
||||
if pp.leader != nil {
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
}
|
||||
|
||||
for msg := range pp.input {
|
||||
if msg.retries > pp.highWatermark {
|
||||
// a new, higher, retry level; handle it and then back off
|
||||
pp.newHighWatermark(msg.retries)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
} else if pp.highWatermark > 0 {
|
||||
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
|
||||
if msg.retries < pp.highWatermark {
|
||||
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
|
||||
if msg.flags&fin == fin {
|
||||
pp.retryState[msg.retries].expectChaser = false
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
} else {
|
||||
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
|
||||
}
|
||||
continue
|
||||
} else if msg.flags&fin == fin {
|
||||
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
|
||||
// meaning this retry level is done and we can go down (at least) one level and flush that
|
||||
pp.retryState[pp.highWatermark].expectChaser = false
|
||||
pp.flushRetryBuffers()
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
|
||||
// without breaking any of our ordering guarantees
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnError(msg, err)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
continue
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
if pp.output != nil {
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) newHighWatermark(hwm int) {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
|
||||
pp.highWatermark = hwm
|
||||
|
||||
// send off a fin so that we know when everything "in between" has made it
|
||||
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
|
||||
pp.retryState[pp.highWatermark].expectChaser = true
|
||||
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
|
||||
|
||||
// a new HWM means that our current broker selection is out of date
|
||||
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
pp.output = nil
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) flushRetryBuffers() {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
for {
|
||||
pp.highWatermark--
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
|
||||
goto flushDone
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
for _, msg := range pp.retryState[pp.highWatermark].buf {
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
flushDone:
|
||||
pp.retryState[pp.highWatermark].buf = nil
|
||||
if pp.retryState[pp.highWatermark].expectChaser {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
break
|
||||
} else if pp.highWatermark == 0 {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) updateLeader() error {
|
||||
return pp.breaker.Run(func() (err error) {
|
||||
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// one per broker; also constructs an associated flusher
|
||||
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
var (
|
||||
input = make(chan *ProducerMessage)
|
||||
bridge = make(chan *produceSet)
|
||||
responses = make(chan *brokerProducerResponse)
|
||||
)
|
||||
|
||||
bp := &brokerProducer{
|
||||
parent: p,
|
||||
broker: broker,
|
||||
input: input,
|
||||
output: bridge,
|
||||
responses: responses,
|
||||
buffer: newProduceSet(p),
|
||||
currentRetries: make(map[string]map[int32]error),
|
||||
}
|
||||
go withRecover(bp.run)
|
||||
|
||||
// minimal bridge to make the network response `select`able
|
||||
go withRecover(func() {
|
||||
for set := range bridge {
|
||||
request := set.buildRequest()
|
||||
|
||||
response, err := broker.Produce(request)
|
||||
|
||||
responses <- &brokerProducerResponse{
|
||||
set: set,
|
||||
err: err,
|
||||
res: response,
|
||||
}
|
||||
}
|
||||
close(responses)
|
||||
})
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
type brokerProducerResponse struct {
|
||||
set *produceSet
|
||||
err error
|
||||
res *ProduceResponse
|
||||
}
|
||||
|
||||
// groups messages together into appropriately-sized batches for sending to the broker
|
||||
// handles state related to retries etc
|
||||
type brokerProducer struct {
|
||||
parent *asyncProducer
|
||||
broker *Broker
|
||||
|
||||
input <-chan *ProducerMessage
|
||||
output chan<- *produceSet
|
||||
responses <-chan *brokerProducerResponse
|
||||
|
||||
buffer *produceSet
|
||||
timer <-chan time.Time
|
||||
timerFired bool
|
||||
|
||||
closing error
|
||||
currentRetries map[string]map[int32]error
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) run() {
|
||||
var output chan<- *produceSet
|
||||
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-bp.input:
|
||||
if msg == nil {
|
||||
bp.shutdown()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.flags&syn == syn {
|
||||
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
if bp.currentRetries[msg.Topic] == nil {
|
||||
bp.currentRetries[msg.Topic] = make(map[int32]error)
|
||||
}
|
||||
bp.currentRetries[msg.Topic][msg.Partition] = nil
|
||||
bp.parent.inFlight.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
bp.parent.retryMessage(msg, reason)
|
||||
|
||||
if bp.closing == nil && msg.flags&fin == fin {
|
||||
// we were retrying this partition but we can start processing again
|
||||
delete(bp.currentRetries[msg.Topic], msg.Partition)
|
||||
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.buffer.wouldOverflow(msg) {
|
||||
if err := bp.waitForSpace(msg); err != nil {
|
||||
bp.parent.retryMessage(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := bp.buffer.add(msg); err != nil {
|
||||
bp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
|
||||
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
|
||||
}
|
||||
case <-bp.timer:
|
||||
bp.timerFired = true
|
||||
case output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
if bp.timerFired || bp.buffer.readyToFlush() {
|
||||
output = bp.output
|
||||
} else {
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) shutdown() {
|
||||
for !bp.buffer.empty() {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
close(bp.output)
|
||||
for response := range bp.responses {
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
|
||||
if bp.closing != nil {
|
||||
return bp.closing
|
||||
}
|
||||
|
||||
return bp.currentRetries[msg.Topic][msg.Partition]
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
|
||||
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
// handling a response can change our state, so re-check some things
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
return reason
|
||||
} else if !bp.buffer.wouldOverflow(msg) {
|
||||
return nil
|
||||
}
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) rollOver() {
|
||||
bp.timer = nil
|
||||
bp.timerFired = false
|
||||
bp.buffer = newProduceSet(bp.parent)
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
|
||||
if response.err != nil {
|
||||
bp.handleError(response.set, response.err)
|
||||
} else {
|
||||
bp.handleSuccess(response.set, response.res)
|
||||
}
|
||||
|
||||
if bp.buffer.empty() {
|
||||
bp.rollOver() // this can happen if the response invalidated our buffer
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
|
||||
// we iterate through the blocks in the request set, not the response, so that we notice
|
||||
// if the response is missing a block completely
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
if response == nil {
|
||||
// this only happens when RequiredAcks is NoResponse, so we have to assume success
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
return
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partition)
|
||||
if block == nil {
|
||||
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
|
||||
return
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
// Success
|
||||
case ErrNoError:
|
||||
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
|
||||
for _, msg := range msgs {
|
||||
msg.Timestamp = block.Timestamp
|
||||
}
|
||||
}
|
||||
for i, msg := range msgs {
|
||||
msg.Offset = block.Offset + int64(i)
|
||||
}
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
// Retriable errors
|
||||
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
|
||||
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
|
||||
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
|
||||
bp.broker.ID(), topic, partition, block.Err)
|
||||
bp.currentRetries[topic][partition] = block.Err
|
||||
bp.parent.retryMessages(msgs, block.Err)
|
||||
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
|
||||
// Other non-retriable errors
|
||||
default:
|
||||
bp.parent.returnErrors(msgs, block.Err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.returnErrors(msgs, err)
|
||||
})
|
||||
default:
|
||||
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
|
||||
bp.parent.abandonBrokerConnection(bp.broker)
|
||||
_ = bp.broker.Close()
|
||||
bp.closing = err
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
|
||||
// singleton
|
||||
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
|
||||
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
|
||||
func (p *asyncProducer) retryHandler() {
|
||||
var msg *ProducerMessage
|
||||
buf := queue.New()
|
||||
|
||||
for {
|
||||
if buf.Length() == 0 {
|
||||
msg = <-p.retries
|
||||
} else {
|
||||
select {
|
||||
case msg = <-p.retries:
|
||||
case p.input <- buf.Peek().(*ProducerMessage):
|
||||
buf.Remove()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf.Add(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// utility functions
|
||||
|
||||
func (p *asyncProducer) shutdown() {
|
||||
Logger.Println("Producer shutting down.")
|
||||
p.inFlight.Add(1)
|
||||
p.input <- &ProducerMessage{flags: shutdown}
|
||||
|
||||
p.inFlight.Wait()
|
||||
|
||||
if p.ownClient {
|
||||
err := p.client.Close()
|
||||
if err != nil {
|
||||
Logger.Println("producer/shutdown failed to close the embedded client:", err)
|
||||
}
|
||||
}
|
||||
|
||||
close(p.input)
|
||||
close(p.retries)
|
||||
close(p.errors)
|
||||
close(p.successes)
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
|
||||
msg.clear()
|
||||
pErr := &ProducerError{Msg: msg, Err: err}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.returnError(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
|
||||
for _, msg := range batch {
|
||||
if p.conf.Producer.Return.Successes {
|
||||
msg.clear()
|
||||
p.successes <- msg
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
|
||||
if msg.retries >= p.conf.Producer.Retry.Max {
|
||||
p.returnError(msg, err)
|
||||
} else {
|
||||
msg.retries++
|
||||
p.retries <- msg
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.retryMessage(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
bp := p.brokers[broker]
|
||||
|
||||
if bp == nil {
|
||||
bp = p.newBrokerProducer(broker)
|
||||
p.brokers[broker] = bp
|
||||
p.brokerRefs[bp] = 0
|
||||
}
|
||||
|
||||
p.brokerRefs[bp]++
|
||||
|
||||
return bp
|
||||
}
|
||||
|
||||
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
p.brokerRefs[bp]--
|
||||
if p.brokerRefs[bp] == 0 {
|
||||
close(bp)
|
||||
delete(p.brokerRefs, bp)
|
||||
|
||||
if p.brokers[broker] == bp {
|
||||
delete(p.brokers, broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
delete(p.brokers, broker)
|
||||
}
|
841
vendor/src/github.com/Shopify/sarama/async_producer_test.go
vendored
Normal file
841
vendor/src/github.com/Shopify/sarama/async_producer_test.go
vendored
Normal file
@ -0,0 +1,841 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const TestMessage = "ABC THE MESSAGE"
|
||||
|
||||
func closeProducer(t *testing.T, p AsyncProducer) {
|
||||
var wg sync.WaitGroup
|
||||
p.AsyncClose()
|
||||
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
for _ = range p.Successes() {
|
||||
t.Error("Unexpected message on Successes()")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
for msg := range p.Errors() {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func expectResults(t *testing.T, p AsyncProducer, successes, errors int) {
|
||||
expect := successes + errors
|
||||
for expect > 0 {
|
||||
select {
|
||||
case msg := <-p.Errors():
|
||||
if msg.Msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
errors--
|
||||
expect--
|
||||
if errors < 0 {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
case msg := <-p.Successes():
|
||||
if msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
successes--
|
||||
expect--
|
||||
if successes < 0 {
|
||||
t.Error("Too many successes")
|
||||
}
|
||||
}
|
||||
}
|
||||
if successes != 0 || errors != 0 {
|
||||
t.Error("Unexpected successes", successes, "or errors", errors)
|
||||
}
|
||||
}
|
||||
|
||||
type testPartitioner chan *int32
|
||||
|
||||
func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
part := <-p
|
||||
if part == nil {
|
||||
return 0, errors.New("BOOM")
|
||||
}
|
||||
|
||||
return *part, nil
|
||||
}
|
||||
|
||||
func (p testPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p testPartitioner) feed(partition int32) {
|
||||
p <- &partition
|
||||
}
|
||||
|
||||
type flakyEncoder bool
|
||||
|
||||
func (f flakyEncoder) Length() int {
|
||||
return len(TestMessage)
|
||||
}
|
||||
|
||||
func (f flakyEncoder) Encode() ([]byte, error) {
|
||||
if !bool(f) {
|
||||
return nil, errors.New("flaky encoding error")
|
||||
}
|
||||
return []byte(TestMessage), nil
|
||||
}
|
||||
|
||||
func TestAsyncProducer(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i}
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case msg := <-producer.Errors():
|
||||
t.Error(msg.Err)
|
||||
if msg.Msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
case msg := <-producer.Successes():
|
||||
if msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
if msg.Metadata.(int) != i {
|
||||
t.Error("Message metadata did not match")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleFlushes(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for flush := 0; flush < 3; flush++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 5, 0)
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleBrokers(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader0 := NewMockBroker(t, 2)
|
||||
leader1 := NewMockBroker(t, 3)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
|
||||
metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodResponse0 := new(ProduceResponse)
|
||||
prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader0.Returns(prodResponse0)
|
||||
|
||||
prodResponse1 := new(ProduceResponse)
|
||||
prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError)
|
||||
leader1.Returns(prodResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = NewRoundRobinPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader1.Close()
|
||||
leader0.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerCustomPartitioner(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodResponse := new(ProduceResponse)
|
||||
prodResponse.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 2
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = func(topic string) Partitioner {
|
||||
p := make(testPartitioner)
|
||||
go func() {
|
||||
p.feed(0)
|
||||
p <- nil
|
||||
p <- nil
|
||||
p <- nil
|
||||
p.feed(0)
|
||||
}()
|
||||
return p
|
||||
}
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 2, 3)
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerFailureRetry(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader1 := NewMockBroker(t, 2)
|
||||
leader2 := NewMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
seedBroker.Close()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader1.Returns(prodNotLeader)
|
||||
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
leader1.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
leader1.Close()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
leader2.Close()
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerEncoderFailures(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 1
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = NewManualPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for flush := 0; flush < 3; flush++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)}
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)}
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)}
|
||||
expectResults(t, producer, 1, 2)
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
// If a Kafka broker becomes unavailable and then returns back in service, then
|
||||
// producer reconnects to it and continues sending messages.
|
||||
func TestAsyncProducerBrokerBounce(t *testing.T) {
|
||||
// Given
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 1
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// When: a broker connection gets reset by a broker (network glitch, restart, you name it).
|
||||
leader.Close() // producer should get EOF
|
||||
leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles
|
||||
seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again
|
||||
|
||||
// Then: a produced message goes through the new broker connection.
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader1 := NewMockBroker(t, 2)
|
||||
leader2 := NewMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Max = 3
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader1.Close() // producer should get EOF
|
||||
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
|
||||
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
|
||||
|
||||
// ok fine, tell it to go to leader2 finally
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
seedBroker.Close()
|
||||
leader2.Close()
|
||||
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleRetries(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader1 := NewMockBroker(t, 2)
|
||||
leader2 := NewMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Max = 4
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader1.Returns(prodNotLeader)
|
||||
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
leader2.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
leader1.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
leader1.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
seedBroker.Close()
|
||||
leader1.Close()
|
||||
leader2.Close()
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerOutOfRetries(t *testing.T) {
|
||||
t.Skip("Enable once bug #294 is fixed.")
|
||||
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case msg := <-producer.Errors():
|
||||
if msg.Err != ErrNotLeaderForPartition {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
case <-producer.Successes():
|
||||
t.Error("Unexpected success")
|
||||
}
|
||||
}
|
||||
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 1
|
||||
config.Producer.Partitioner = NewRoundRobinPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// prime partition 0
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// prime partition 1
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
prodSuccess = new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// reboot the broker (the producer will get EOF on its existing connection)
|
||||
leader.Close()
|
||||
leader = NewMockBrokerAddr(t, 2, leaderAddr)
|
||||
|
||||
// send another message on partition 0 to trigger the EOF and retry
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
|
||||
// tell partition 0 to go to that broker again
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// succeed this time
|
||||
prodSuccess = new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// shutdown
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerFlusherRetryCondition(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 1
|
||||
config.Producer.Partitioner = NewManualPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// prime partitions
|
||||
for p := int32(0); p < 2; p++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p}
|
||||
}
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", p, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 5, 0)
|
||||
}
|
||||
|
||||
// send more messages on partition 0
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
leader.SetHandlerByMap(map[string]MockResponse{
|
||||
"ProduceRequest": NewMockProduceResponse(t).
|
||||
SetError("my_topic", 0, ErrNoError),
|
||||
})
|
||||
|
||||
// tell partition 0 to go to that broker again
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// succeed this time
|
||||
expectResults(t, producer, 5, 0)
|
||||
|
||||
// put five more through
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
|
||||
}
|
||||
expectResults(t, producer, 5, 0)
|
||||
|
||||
// shutdown
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerRetryShutdown(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataLeader := new(MetadataResponse)
|
||||
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
producer.AsyncClose()
|
||||
time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in
|
||||
|
||||
producer.Input() <- &ProducerMessage{Topic: "FOO"}
|
||||
if err := <-producer.Errors(); err.Err != ErrShuttingDown {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
|
||||
// wait for the async-closed producer to shut down fully
|
||||
for err := range producer.Errors() {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAsyncProducerNoReturns(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
|
||||
metadataLeader := new(MetadataResponse)
|
||||
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = false
|
||||
config.Producer.Return.Errors = false
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
wait := make(chan bool)
|
||||
go func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
close(wait)
|
||||
}()
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
<-wait
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
// This example shows how to use the producer while simultaneously
|
||||
// reading the Errors channel to know about any failures.
|
||||
func ExampleAsyncProducer_select() {
|
||||
producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Trap SIGINT to trigger a shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
var enqueued, errors int
|
||||
ProducerLoop:
|
||||
for {
|
||||
select {
|
||||
case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}:
|
||||
enqueued++
|
||||
case err := <-producer.Errors():
|
||||
log.Println("Failed to produce message", err)
|
||||
errors++
|
||||
case <-signals:
|
||||
break ProducerLoop
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
|
||||
}
|
||||
|
||||
// This example shows how to use the producer with separate goroutines
|
||||
// reading from the Successes and Errors channels. Note that in order
|
||||
// for the Successes channel to be populated, you have to set
|
||||
// config.Producer.Return.Successes to true.
|
||||
func ExampleAsyncProducer_goroutines() {
|
||||
config := NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{"localhost:9092"}, config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Trap SIGINT to trigger a graceful shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
enqueued, successes, errors int
|
||||
)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for _ = range producer.Successes() {
|
||||
successes++
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for err := range producer.Errors() {
|
||||
log.Println(err)
|
||||
errors++
|
||||
}
|
||||
}()
|
||||
|
||||
ProducerLoop:
|
||||
for {
|
||||
message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
|
||||
select {
|
||||
case producer.Input() <- message:
|
||||
enqueued++
|
||||
|
||||
case <-signals:
|
||||
producer.AsyncClose() // Trigger a shutdown of the producer.
|
||||
break ProducerLoop
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
|
||||
}
|
674
vendor/src/github.com/Shopify/sarama/broker.go
vendored
Normal file
674
vendor/src/github.com/Shopify/sarama/broker.go
vendored
Normal file
@ -0,0 +1,674 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
|
||||
type Broker struct {
|
||||
id int32
|
||||
addr string
|
||||
|
||||
conf *Config
|
||||
correlationID int32
|
||||
conn net.Conn
|
||||
connErr error
|
||||
lock sync.Mutex
|
||||
opened int32
|
||||
|
||||
responses chan responsePromise
|
||||
done chan bool
|
||||
|
||||
incomingByteRate metrics.Meter
|
||||
requestRate metrics.Meter
|
||||
requestSize metrics.Histogram
|
||||
requestLatency metrics.Histogram
|
||||
outgoingByteRate metrics.Meter
|
||||
responseRate metrics.Meter
|
||||
responseSize metrics.Histogram
|
||||
brokerIncomingByteRate metrics.Meter
|
||||
brokerRequestRate metrics.Meter
|
||||
brokerRequestSize metrics.Histogram
|
||||
brokerRequestLatency metrics.Histogram
|
||||
brokerOutgoingByteRate metrics.Meter
|
||||
brokerResponseRate metrics.Meter
|
||||
brokerResponseSize metrics.Histogram
|
||||
}
|
||||
|
||||
type responsePromise struct {
|
||||
requestTime time.Time
|
||||
correlationID int32
|
||||
packets chan []byte
|
||||
errors chan error
|
||||
}
|
||||
|
||||
// NewBroker creates and returns a Broker targetting the given host:port address.
|
||||
// This does not attempt to actually connect, you have to call Open() for that.
|
||||
func NewBroker(addr string) *Broker {
|
||||
return &Broker{id: -1, addr: addr}
|
||||
}
|
||||
|
||||
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
|
||||
// waiting for the connection to complete. This means that any subsequent operations on the broker will
|
||||
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
|
||||
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
|
||||
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
|
||||
func (b *Broker) Open(conf *Config) error {
|
||||
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
err := conf.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
|
||||
go withRecover(func() {
|
||||
defer b.lock.Unlock()
|
||||
|
||||
dialer := net.Dialer{
|
||||
Timeout: conf.Net.DialTimeout,
|
||||
KeepAlive: conf.Net.KeepAlive,
|
||||
}
|
||||
|
||||
if conf.Net.TLS.Enable {
|
||||
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
|
||||
} else {
|
||||
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
|
||||
}
|
||||
if b.connErr != nil {
|
||||
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
b.conn = newBufConn(b.conn)
|
||||
|
||||
b.conf = conf
|
||||
|
||||
// Create or reuse the global metrics shared between brokers
|
||||
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
|
||||
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
|
||||
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
|
||||
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
|
||||
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
|
||||
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
|
||||
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
|
||||
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
|
||||
// the same id (-1) and are already exposed through the global metrics above
|
||||
if b.id >= 0 {
|
||||
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
|
||||
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
|
||||
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
|
||||
}
|
||||
|
||||
if conf.Net.SASL.Enable {
|
||||
b.connErr = b.sendAndReceiveSASLPlainAuth()
|
||||
if b.connErr != nil {
|
||||
err = b.conn.Close()
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.done = make(chan bool)
|
||||
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
|
||||
|
||||
if b.id >= 0 {
|
||||
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
|
||||
} else {
|
||||
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
|
||||
}
|
||||
go withRecover(b.responseReceiver)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connected returns true if the broker is connected and false otherwise. If the broker is not
|
||||
// connected but it had tried to connect, the error from that connection attempt is also returned.
|
||||
func (b *Broker) Connected() (bool, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
return b.conn != nil, b.connErr
|
||||
}
|
||||
|
||||
func (b *Broker) Close() error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
return ErrNotConnected
|
||||
}
|
||||
|
||||
close(b.responses)
|
||||
<-b.done
|
||||
|
||||
err := b.conn.Close()
|
||||
|
||||
b.conn = nil
|
||||
b.connErr = nil
|
||||
b.done = nil
|
||||
b.responses = nil
|
||||
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
|
||||
func (b *Broker) ID() int32 {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
|
||||
func (b *Broker) Addr() string {
|
||||
return b.addr
|
||||
}
|
||||
|
||||
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
|
||||
response := new(MetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
|
||||
response := new(ConsumerMetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
|
||||
response := new(OffsetResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
|
||||
var response *ProduceResponse
|
||||
var err error
|
||||
|
||||
if request.RequiredAcks == NoResponse {
|
||||
err = b.sendAndReceive(request, nil)
|
||||
} else {
|
||||
response = new(ProduceResponse)
|
||||
err = b.sendAndReceive(request, response)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
|
||||
response := new(FetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
|
||||
response := new(OffsetCommitResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
|
||||
response := new(OffsetFetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
|
||||
response := new(JoinGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
|
||||
response := new(SyncGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
|
||||
response := new(LeaveGroupResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
|
||||
response := new(HeartbeatResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
|
||||
response := new(ListGroupsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
|
||||
response := new(DescribeGroupsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
if b.connErr != nil {
|
||||
return nil, b.connErr
|
||||
}
|
||||
return nil, ErrNotConnected
|
||||
}
|
||||
|
||||
if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
|
||||
return nil, ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
|
||||
buf, err := encode(req, b.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytes, err := b.conn.Write(buf)
|
||||
b.updateOutgoingCommunicationMetrics(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.correlationID++
|
||||
|
||||
if !promiseResponse {
|
||||
// Record request latency without the response
|
||||
b.updateRequestLatencyMetrics(time.Since(requestTime))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
|
||||
b.responses <- promise
|
||||
|
||||
return &promise, nil
|
||||
}
|
||||
|
||||
func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
|
||||
promise, err := b.send(req, res != nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if promise == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case buf := <-promise.packets:
|
||||
return versionedDecode(buf, res, req.version())
|
||||
case err = <-promise.errors:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) decode(pd packetDecoder) (err error) {
|
||||
b.id, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
|
||||
if _, _, err := net.SplitHostPort(b.addr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) encode(pe packetEncoder) (err error) {
|
||||
|
||||
host, portstr, err := net.SplitHostPort(b.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.Atoi(portstr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(b.id)
|
||||
|
||||
err = pe.putString(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(int32(port))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) responseReceiver() {
|
||||
var dead error
|
||||
header := make([]byte, 8)
|
||||
for response := range b.responses {
|
||||
if dead != nil {
|
||||
response.errors <- dead
|
||||
continue
|
||||
}
|
||||
|
||||
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
|
||||
if err != nil {
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
bytesReadHeader, err := io.ReadFull(b.conn, header)
|
||||
requestLatency := time.Since(response.requestTime)
|
||||
if err != nil {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
decodedHeader := responseHeader{}
|
||||
err = decode(header, &decodedHeader)
|
||||
if err != nil {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
if decodedHeader.correlationID != response.correlationID {
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
|
||||
// TODO if decoded ID < cur ID, discard until we catch up
|
||||
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
|
||||
dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
|
||||
response.errors <- dead
|
||||
continue
|
||||
}
|
||||
|
||||
buf := make([]byte, decodedHeader.length-4)
|
||||
bytesReadBody, err := io.ReadFull(b.conn, buf)
|
||||
b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
|
||||
if err != nil {
|
||||
dead = err
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
response.packets <- buf
|
||||
}
|
||||
close(b.done)
|
||||
}
|
||||
|
||||
func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
|
||||
rb := &SaslHandshakeRequest{"PLAIN"}
|
||||
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
|
||||
buf, err := encode(req, b.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytes, err := b.conn.Write(buf)
|
||||
b.updateOutgoingCommunicationMetrics(bytes)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
b.correlationID++
|
||||
//wait for the response
|
||||
header := make([]byte, 8) // response header
|
||||
_, err = io.ReadFull(b.conn, header)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
length := binary.BigEndian.Uint32(header[:4])
|
||||
payload := make([]byte, length-4)
|
||||
n, err := io.ReadFull(b.conn, payload)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
|
||||
res := &SaslHandshakeResponse{}
|
||||
err = versionedDecode(payload, res, 0)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
if res.Err != ErrNoError {
|
||||
Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
|
||||
return res.Err
|
||||
}
|
||||
Logger.Print("Successful SASL handshake")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
|
||||
// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
|
||||
//
|
||||
// In SASL Plain, Kafka expects the auth header to be in the following format
|
||||
// Message format (from https://tools.ietf.org/html/rfc4616):
|
||||
//
|
||||
// message = [authzid] UTF8NUL authcid UTF8NUL passwd
|
||||
// authcid = 1*SAFE ; MUST accept up to 255 octets
|
||||
// authzid = 1*SAFE ; MUST accept up to 255 octets
|
||||
// passwd = 1*SAFE ; MUST accept up to 255 octets
|
||||
// UTF8NUL = %x00 ; UTF-8 encoded NUL character
|
||||
//
|
||||
// SAFE = UTF1 / UTF2 / UTF3 / UTF4
|
||||
// ;; any UTF-8 encoded Unicode character except NUL
|
||||
//
|
||||
// When credentials are valid, Kafka returns a 4 byte array of null characters.
|
||||
// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
|
||||
// of responding to bad credentials but thats how its being done today.
|
||||
func (b *Broker) sendAndReceiveSASLPlainAuth() error {
|
||||
if b.conf.Net.SASL.Handshake {
|
||||
handshakeErr := b.sendAndReceiveSASLPlainHandshake()
|
||||
if handshakeErr != nil {
|
||||
Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
|
||||
return handshakeErr
|
||||
}
|
||||
}
|
||||
length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
|
||||
authBytes := make([]byte, length+4) //4 byte length header + auth data
|
||||
binary.BigEndian.PutUint32(authBytes, uint32(length))
|
||||
copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
|
||||
|
||||
err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
requestTime := time.Now()
|
||||
bytesWritten, err := b.conn.Write(authBytes)
|
||||
b.updateOutgoingCommunicationMetrics(bytesWritten)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
header := make([]byte, 4)
|
||||
n, err := io.ReadFull(b.conn, header)
|
||||
b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
|
||||
// If the credentials are valid, we would get a 4 byte response filled with null characters.
|
||||
// Otherwise, the broker closes the connection and we get an EOF
|
||||
if err != nil {
|
||||
Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
|
||||
b.updateRequestLatencyMetrics(requestLatency)
|
||||
b.responseRate.Mark(1)
|
||||
if b.brokerResponseRate != nil {
|
||||
b.brokerResponseRate.Mark(1)
|
||||
}
|
||||
responseSize := int64(bytes)
|
||||
b.incomingByteRate.Mark(responseSize)
|
||||
if b.brokerIncomingByteRate != nil {
|
||||
b.brokerIncomingByteRate.Mark(responseSize)
|
||||
}
|
||||
b.responseSize.Update(responseSize)
|
||||
if b.brokerResponseSize != nil {
|
||||
b.brokerResponseSize.Update(responseSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
|
||||
requestLatencyInMs := int64(requestLatency / time.Millisecond)
|
||||
b.requestLatency.Update(requestLatencyInMs)
|
||||
if b.brokerRequestLatency != nil {
|
||||
b.brokerRequestLatency.Update(requestLatencyInMs)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
|
||||
b.requestRate.Mark(1)
|
||||
if b.brokerRequestRate != nil {
|
||||
b.brokerRequestRate.Mark(1)
|
||||
}
|
||||
requestSize := int64(bytes)
|
||||
b.outgoingByteRate.Mark(requestSize)
|
||||
if b.brokerOutgoingByteRate != nil {
|
||||
b.brokerOutgoingByteRate.Mark(requestSize)
|
||||
}
|
||||
b.requestSize.Update(requestSize)
|
||||
if b.brokerRequestSize != nil {
|
||||
b.brokerRequestSize.Update(requestSize)
|
||||
}
|
||||
}
|
315
vendor/src/github.com/Shopify/sarama/broker_test.go
vendored
Normal file
315
vendor/src/github.com/Shopify/sarama/broker_test.go
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ExampleBroker() {
|
||||
broker := NewBroker("localhost:9092")
|
||||
err := broker.Open(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
request := MetadataRequest{Topics: []string{"myTopic"}}
|
||||
response, err := broker.GetMetadata(&request)
|
||||
if err != nil {
|
||||
_ = broker.Close()
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Println("There are", len(response.Topics), "topics active in the cluster.")
|
||||
|
||||
if err = broker.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type mockEncoder struct {
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
func (m mockEncoder) encode(pe packetEncoder) error {
|
||||
return pe.putRawBytes(m.bytes)
|
||||
}
|
||||
|
||||
type brokerMetrics struct {
|
||||
bytesRead int
|
||||
bytesWritten int
|
||||
}
|
||||
|
||||
func TestBrokerAccessors(t *testing.T) {
|
||||
broker := NewBroker("abc:123")
|
||||
|
||||
if broker.ID() != -1 {
|
||||
t.Error("New broker didn't have an ID of -1.")
|
||||
}
|
||||
|
||||
if broker.Addr() != "abc:123" {
|
||||
t.Error("New broker didn't have the correct address")
|
||||
}
|
||||
|
||||
broker.id = 34
|
||||
if broker.ID() != 34 {
|
||||
t.Error("Manually setting broker ID did not take effect.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleBrokerCommunication(t *testing.T) {
|
||||
for _, tt := range brokerTestTable {
|
||||
Logger.Printf("Testing broker communication for %s", tt.name)
|
||||
mb := NewMockBroker(t, 0)
|
||||
mb.Returns(&mockEncoder{tt.response})
|
||||
pendingNotify := make(chan brokerMetrics)
|
||||
// Register a callback to be notified about successful requests
|
||||
mb.SetNotifier(func(bytesRead, bytesWritten int) {
|
||||
pendingNotify <- brokerMetrics{bytesRead, bytesWritten}
|
||||
})
|
||||
broker := NewBroker(mb.Addr())
|
||||
// Set the broker id in order to validate local broker metrics
|
||||
broker.id = 0
|
||||
conf := NewConfig()
|
||||
conf.Version = V0_10_0_0
|
||||
err := broker.Open(conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tt.runner(t, broker)
|
||||
err = broker.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// Wait up to 500 ms for the remote broker to process the request and
|
||||
// notify us about the metrics
|
||||
timeout := 500 * time.Millisecond
|
||||
select {
|
||||
case mockBrokerMetrics := <-pendingNotify:
|
||||
validateBrokerMetrics(t, broker, mockBrokerMetrics)
|
||||
case <-time.After(timeout):
|
||||
t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
|
||||
}
|
||||
mb.Close()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
|
||||
var brokerTestTable = []struct {
|
||||
name string
|
||||
response []byte
|
||||
runner func(*testing.T, *Broker)
|
||||
}{
|
||||
{"MetadataRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := MetadataRequest{}
|
||||
response, err := broker.GetMetadata(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Metadata request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ConsumerMetadataRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ConsumerMetadataRequest{}
|
||||
response, err := broker.GetConsumerMetadata(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Consumer Metadata request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ProduceRequest (NoResponse)",
|
||||
[]byte{},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
request.RequiredAcks = NoResponse
|
||||
response, err := broker.Produce(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response != nil {
|
||||
t.Error("Produce request with NoResponse got a response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ProduceRequest (WaitForLocal)",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
request.RequiredAcks = WaitForLocal
|
||||
response, err := broker.Produce(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Produce request without NoResponse got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"FetchRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := FetchRequest{}
|
||||
response, err := broker.Fetch(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Fetch request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"OffsetFetchRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetFetchRequest{}
|
||||
response, err := broker.FetchOffset(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("OffsetFetch request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"OffsetCommitRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetCommitRequest{}
|
||||
response, err := broker.CommitOffset(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("OffsetCommit request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"OffsetRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetRequest{}
|
||||
response, err := broker.GetAvailableOffsets(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Offset request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"JoinGroupRequest",
|
||||
[]byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := JoinGroupRequest{}
|
||||
response, err := broker.JoinGroup(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("JoinGroup request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"SyncGroupRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := SyncGroupRequest{}
|
||||
response, err := broker.SyncGroup(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("SyncGroup request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"LeaveGroupRequest",
|
||||
[]byte{0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := LeaveGroupRequest{}
|
||||
response, err := broker.LeaveGroup(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("LeaveGroup request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"HeartbeatRequest",
|
||||
[]byte{0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := HeartbeatRequest{}
|
||||
response, err := broker.Heartbeat(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Heartbeat request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"ListGroupsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ListGroupsRequest{}
|
||||
response, err := broker.ListGroups(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("ListGroups request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{"DescribeGroupsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := DescribeGroupsRequest{}
|
||||
response, err := broker.DescribeGroups(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("DescribeGroups request got no response!")
|
||||
}
|
||||
}},
|
||||
}
|
||||
|
||||
func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
|
||||
metricValidators := newMetricValidators()
|
||||
mockBrokerBytesRead := mockBrokerMetrics.bytesRead
|
||||
mockBrokerBytesWritten := mockBrokerMetrics.bytesWritten
|
||||
|
||||
// Check that the number of bytes sent corresponds to what the mock broker received
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("incoming-byte-rate", mockBrokerBytesWritten))
|
||||
if mockBrokerBytesWritten == 0 {
|
||||
// This a ProduceRequest with NoResponse
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 0))
|
||||
metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 0))
|
||||
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", 0, 0))
|
||||
} else {
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 1))
|
||||
metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 1))
|
||||
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", mockBrokerBytesWritten, mockBrokerBytesWritten))
|
||||
}
|
||||
|
||||
// Check that the number of bytes received corresponds to what the mock broker sent
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("outgoing-byte-rate", mockBrokerBytesRead))
|
||||
metricValidators.registerForAllBrokers(broker, countMeterValidator("request-rate", 1))
|
||||
metricValidators.registerForAllBrokers(broker, countHistogramValidator("request-size", 1))
|
||||
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("request-size", mockBrokerBytesRead, mockBrokerBytesRead))
|
||||
|
||||
// Run the validators
|
||||
metricValidators.run(t, broker.conf.MetricRegistry)
|
||||
}
|
749
vendor/src/github.com/Shopify/sarama/client.go
vendored
Normal file
749
vendor/src/github.com/Shopify/sarama/client.go
vendored
Normal file
@ -0,0 +1,749 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
|
||||
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
|
||||
// automatically when it passes out of scope. It is safe to share a client amongst many
|
||||
// users, however Kafka will process requests from a single client strictly in serial,
|
||||
// so it is generally more efficient to use the default one client per producer/consumer.
|
||||
type Client interface {
|
||||
// Config returns the Config struct of the client. This struct should not be
|
||||
// altered after it has been created.
|
||||
Config() *Config
|
||||
|
||||
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
|
||||
Brokers() []*Broker
|
||||
|
||||
// Topics returns the set of available topics as retrieved from cluster metadata.
|
||||
Topics() ([]string, error)
|
||||
|
||||
// Partitions returns the sorted list of all partition IDs for the given topic.
|
||||
Partitions(topic string) ([]int32, error)
|
||||
|
||||
// WritablePartitions returns the sorted list of all writable partition IDs for
|
||||
// the given topic, where "writable" means "having a valid leader accepting
|
||||
// writes".
|
||||
WritablePartitions(topic string) ([]int32, error)
|
||||
|
||||
// Leader returns the broker object that is the leader of the current
|
||||
// topic/partition, as determined by querying the cluster metadata.
|
||||
Leader(topic string, partitionID int32) (*Broker, error)
|
||||
|
||||
// Replicas returns the set of all replica IDs for the given partition.
|
||||
Replicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
|
||||
// available metadata for those topics. If no topics are provided, it will refresh
|
||||
// metadata for all topics.
|
||||
RefreshMetadata(topics ...string) error
|
||||
|
||||
// GetOffset queries the cluster to get the most recent available offset at the
|
||||
// given time on the topic/partition combination. Time should be OffsetOldest for
|
||||
// the earliest available offset, OffsetNewest for the offset of the message that
|
||||
// will be produced next, or a time.
|
||||
GetOffset(topic string, partitionID int32, time int64) (int64, error)
|
||||
|
||||
// Coordinator returns the coordinating broker for a consumer group. It will
|
||||
// return a locally cached value if it's available. You can call
|
||||
// RefreshCoordinator to update the cached value. This function only works on
|
||||
// Kafka 0.8.2 and higher.
|
||||
Coordinator(consumerGroup string) (*Broker, error)
|
||||
|
||||
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
|
||||
// in local cache. This function only works on Kafka 0.8.2 and higher.
|
||||
RefreshCoordinator(consumerGroup string) error
|
||||
|
||||
// Close shuts down all broker connections managed by this client. It is required
|
||||
// to call this function before a client object passes out of scope, as it will
|
||||
// otherwise leak memory. You must close any Producers or Consumers using a client
|
||||
// before you close the client.
|
||||
Close() error
|
||||
|
||||
// Closed returns true if the client has already had Close called on it
|
||||
Closed() bool
|
||||
}
|
||||
|
||||
const (
|
||||
// OffsetNewest stands for the log head offset, i.e. the offset that will be
|
||||
// assigned to the next message that will be produced to the partition. You
|
||||
// can send this to a client's GetOffset method to get this offset, or when
|
||||
// calling ConsumePartition to start consuming new messages.
|
||||
OffsetNewest int64 = -1
|
||||
// OffsetOldest stands for the oldest offset available on the broker for a
|
||||
// partition. You can send this to a client's GetOffset method to get this
|
||||
// offset, or when calling ConsumePartition to start consuming from the
|
||||
// oldest offset that is still available on the broker.
|
||||
OffsetOldest int64 = -2
|
||||
)
|
||||
|
||||
type client struct {
|
||||
conf *Config
|
||||
closer, closed chan none // for shutting down background metadata updater
|
||||
|
||||
// the broker addresses given to us through the constructor are not guaranteed to be returned in
|
||||
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
|
||||
// so we store them separately
|
||||
seedBrokers []*Broker
|
||||
deadSeeds []*Broker
|
||||
|
||||
brokers map[int32]*Broker // maps broker ids to brokers
|
||||
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
|
||||
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
|
||||
|
||||
// If the number of partitions is large, we can get some churn calling cachedPartitions,
|
||||
// so the result is cached. It is important to update this value whenever metadata is changed
|
||||
cachedPartitionsResults map[string][maxPartitionIndex][]int32
|
||||
|
||||
lock sync.RWMutex // protects access to the maps that hold cluster state.
|
||||
}
|
||||
|
||||
// NewClient creates a new Client. It connects to one of the given broker addresses
|
||||
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
|
||||
// be retrieved from any of the given broker addresses, the client is not created.
|
||||
func NewClient(addrs []string, conf *Config) (Client, error) {
|
||||
Logger.Println("Initializing new client")
|
||||
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
if err := conf.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(addrs) < 1 {
|
||||
return nil, ConfigurationError("You must provide at least one broker address")
|
||||
}
|
||||
|
||||
client := &client{
|
||||
conf: conf,
|
||||
closer: make(chan none),
|
||||
closed: make(chan none),
|
||||
brokers: make(map[int32]*Broker),
|
||||
metadata: make(map[string]map[int32]*PartitionMetadata),
|
||||
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
|
||||
coordinators: make(map[string]int32),
|
||||
}
|
||||
|
||||
random := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, index := range random.Perm(len(addrs)) {
|
||||
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
|
||||
}
|
||||
|
||||
// do an initial fetch of all cluster metadata by specifing an empty list of topics
|
||||
err := client.RefreshMetadata()
|
||||
switch err {
|
||||
case nil:
|
||||
break
|
||||
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
|
||||
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
|
||||
Logger.Println(err)
|
||||
default:
|
||||
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
|
||||
_ = client.Close()
|
||||
return nil, err
|
||||
}
|
||||
go withRecover(client.backgroundMetadataUpdater)
|
||||
|
||||
Logger.Println("Successfully initialized new client")
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (client *client) Config() *Config {
|
||||
return client.conf
|
||||
}
|
||||
|
||||
func (client *client) Brokers() []*Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
brokers := make([]*Broker, 0)
|
||||
for _, broker := range client.brokers {
|
||||
brokers = append(brokers, broker)
|
||||
}
|
||||
return brokers
|
||||
}
|
||||
|
||||
func (client *client) Close() error {
|
||||
if client.Closed() {
|
||||
// Chances are this is being called from a defer() and the error will go unobserved
|
||||
// so we go ahead and log the event in this case.
|
||||
Logger.Printf("Close() called on already closed client")
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
// shutdown and wait for the background thread before we take the lock, to avoid races
|
||||
close(client.closer)
|
||||
<-client.closed
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
Logger.Println("Closing Client")
|
||||
|
||||
for _, broker := range client.brokers {
|
||||
safeAsyncClose(broker)
|
||||
}
|
||||
|
||||
for _, broker := range client.seedBrokers {
|
||||
safeAsyncClose(broker)
|
||||
}
|
||||
|
||||
client.brokers = nil
|
||||
client.metadata = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) Closed() bool {
|
||||
return client.brokers == nil
|
||||
}
|
||||
|
||||
func (client *client) Topics() ([]string, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
ret := make([]string, 0, len(client.metadata))
|
||||
for topic := range client.metadata {
|
||||
ret = append(ret, topic)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (client *client) Partitions(topic string) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
partitions := client.cachedPartitions(topic, allPartitions)
|
||||
|
||||
if len(partitions) == 0 {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions = client.cachedPartitions(topic, allPartitions)
|
||||
}
|
||||
|
||||
if partitions == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
func (client *client) WritablePartitions(topic string) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
partitions := client.cachedPartitions(topic, writablePartitions)
|
||||
|
||||
// len==0 catches when it's nil (no such topic) and the odd case when every single
|
||||
// partition is undergoing leader election simultaneously. Callers have to be able to handle
|
||||
// this function returning an empty slice (which is a valid return value) but catching it
|
||||
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
|
||||
// a metadata refresh as a nicety so callers can just try again and don't have to manually
|
||||
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
|
||||
if len(partitions) == 0 {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partitions = client.cachedPartitions(topic, writablePartitions)
|
||||
}
|
||||
|
||||
if partitions == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return partitions, nil
|
||||
}
|
||||
|
||||
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
metadata := client.cachedMetadata(topic, partitionID)
|
||||
|
||||
if metadata == nil {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = client.cachedMetadata(topic, partitionID)
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
if metadata.Err == ErrReplicaNotAvailable {
|
||||
return nil, metadata.Err
|
||||
}
|
||||
return dupeAndSort(metadata.Replicas), nil
|
||||
}
|
||||
|
||||
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
leader, err := client.cachedLeader(topic, partitionID)
|
||||
|
||||
if leader == nil {
|
||||
err = client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
leader, err = client.cachedLeader(topic, partitionID)
|
||||
}
|
||||
|
||||
return leader, err
|
||||
}
|
||||
|
||||
func (client *client) RefreshMetadata(topics ...string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
|
||||
// error. This handles the case by returning an error instead of sending it
|
||||
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
|
||||
for _, topic := range topics {
|
||||
if len(topic) == 0 {
|
||||
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
|
||||
}
|
||||
}
|
||||
|
||||
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
|
||||
}
|
||||
|
||||
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
|
||||
if client.Closed() {
|
||||
return -1, ErrClosedClient
|
||||
}
|
||||
|
||||
offset, err := client.getOffset(topic, partitionID, time)
|
||||
|
||||
if err != nil {
|
||||
if err := client.RefreshMetadata(topic); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return client.getOffset(topic, partitionID, time)
|
||||
}
|
||||
|
||||
return offset, err
|
||||
}
|
||||
|
||||
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
coordinator := client.cachedCoordinator(consumerGroup)
|
||||
|
||||
if coordinator == nil {
|
||||
if err := client.RefreshCoordinator(consumerGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
coordinator = client.cachedCoordinator(consumerGroup)
|
||||
}
|
||||
|
||||
if coordinator == nil {
|
||||
return nil, ErrConsumerCoordinatorNotAvailable
|
||||
}
|
||||
|
||||
_ = coordinator.Open(client.conf)
|
||||
return coordinator, nil
|
||||
}
|
||||
|
||||
func (client *client) RefreshCoordinator(consumerGroup string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
client.registerBroker(response.Coordinator)
|
||||
client.coordinators[consumerGroup] = response.Coordinator.ID()
|
||||
return nil
|
||||
}
|
||||
|
||||
// private broker management helpers
|
||||
|
||||
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
|
||||
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
|
||||
// or a previously registered Broker instance. You must hold the write lock before calling this function.
|
||||
func (client *client) registerBroker(broker *Broker) {
|
||||
if client.brokers[broker.ID()] == nil {
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
|
||||
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
|
||||
safeAsyncClose(client.brokers[broker.ID()])
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
|
||||
}
|
||||
}
|
||||
|
||||
// deregisterBroker removes a broker from the seedsBroker list, and if it's
|
||||
// not the seedbroker, removes it from brokers map completely.
|
||||
func (client *client) deregisterBroker(broker *Broker) {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
|
||||
client.deadSeeds = append(client.deadSeeds, broker)
|
||||
client.seedBrokers = client.seedBrokers[1:]
|
||||
} else {
|
||||
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
|
||||
// but we really shouldn't have to; once that loop is made better this case can be
|
||||
// removed, and the function generally can be renamed from `deregisterBroker` to
|
||||
// `nextSeedBroker` or something
|
||||
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
|
||||
delete(client.brokers, broker.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) resurrectDeadBrokers() {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
|
||||
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
|
||||
client.deadSeeds = nil
|
||||
}
|
||||
|
||||
func (client *client) any() *Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
if len(client.seedBrokers) > 0 {
|
||||
_ = client.seedBrokers[0].Open(client.conf)
|
||||
return client.seedBrokers[0]
|
||||
}
|
||||
|
||||
// not guaranteed to be random *or* deterministic
|
||||
for _, broker := range client.brokers {
|
||||
_ = broker.Open(client.conf)
|
||||
return broker
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// private caching/lazy metadata helpers
|
||||
|
||||
type partitionType int
|
||||
|
||||
const (
|
||||
allPartitions partitionType = iota
|
||||
writablePartitions
|
||||
// If you add any more types, update the partition cache in update()
|
||||
|
||||
// Ensure this is the last partition type value
|
||||
maxPartitionIndex
|
||||
)
|
||||
|
||||
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions := client.metadata[topic]
|
||||
if partitions != nil {
|
||||
return partitions[partitionID]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions, exists := client.cachedPartitionsResults[topic]
|
||||
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return partitions[partitionSet]
|
||||
}
|
||||
|
||||
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
|
||||
partitions := client.metadata[topic]
|
||||
|
||||
if partitions == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := make([]int32, 0, len(partitions))
|
||||
for _, partition := range partitions {
|
||||
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, partition.ID)
|
||||
}
|
||||
|
||||
sort.Sort(int32Slice(ret))
|
||||
return ret
|
||||
}
|
||||
|
||||
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
partitions := client.metadata[topic]
|
||||
if partitions != nil {
|
||||
metadata, ok := partitions[partitionID]
|
||||
if ok {
|
||||
if metadata.Err == ErrLeaderNotAvailable {
|
||||
return nil, ErrLeaderNotAvailable
|
||||
}
|
||||
b := client.brokers[metadata.Leader]
|
||||
if b == nil {
|
||||
return nil, ErrLeaderNotAvailable
|
||||
}
|
||||
_ = b.Open(client.conf)
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
|
||||
broker, err := client.Leader(topic, partitionID)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
request := &OffsetRequest{}
|
||||
if client.conf.Version.IsAtLeast(V0_10_1_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
request.AddBlock(topic, partitionID, time, 1)
|
||||
|
||||
response, err := broker.GetAvailableOffsets(request)
|
||||
if err != nil {
|
||||
_ = broker.Close()
|
||||
return -1, err
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partitionID)
|
||||
if block == nil {
|
||||
_ = broker.Close()
|
||||
return -1, ErrIncompleteResponse
|
||||
}
|
||||
if block.Err != ErrNoError {
|
||||
return -1, block.Err
|
||||
}
|
||||
if len(block.Offsets) != 1 {
|
||||
return -1, ErrOffsetOutOfRange
|
||||
}
|
||||
|
||||
return block.Offsets[0], nil
|
||||
}
|
||||
|
||||
// core metadata update logic
|
||||
|
||||
func (client *client) backgroundMetadataUpdater() {
|
||||
defer close(client.closed)
|
||||
|
||||
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := client.RefreshMetadata(); err != nil {
|
||||
Logger.Println("Client background metadata update:", err)
|
||||
}
|
||||
case <-client.closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
|
||||
retry := func(err error) error {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
if len(topics) > 0 {
|
||||
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
|
||||
} else {
|
||||
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
|
||||
}
|
||||
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
// valid response, use it
|
||||
if shouldRetry, err := client.updateMetadata(response); shouldRetry {
|
||||
Logger.Println("client/metadata found some partitions to be leaderless")
|
||||
return retry(err) // note: err can be nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
case PacketEncodingError:
|
||||
// didn't even send, return the error
|
||||
return err
|
||||
default:
|
||||
// some other error, remove that broker and try again
|
||||
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
}
|
||||
}
|
||||
|
||||
Logger.Println("client/metadata no available broker to send metadata request to")
|
||||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
||||
|
||||
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
|
||||
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
// For all the brokers we received:
|
||||
// - if it is a new ID, save it
|
||||
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
|
||||
// - otherwise ignore it, replacing our existing one would just bounce the connection
|
||||
for _, broker := range data.Brokers {
|
||||
client.registerBroker(broker)
|
||||
}
|
||||
|
||||
for _, topic := range data.Topics {
|
||||
delete(client.metadata, topic.Name)
|
||||
delete(client.cachedPartitionsResults, topic.Name)
|
||||
|
||||
switch topic.Err {
|
||||
case ErrNoError:
|
||||
break
|
||||
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
|
||||
err = topic.Err
|
||||
continue
|
||||
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
|
||||
err = topic.Err
|
||||
retry = true
|
||||
continue
|
||||
case ErrLeaderNotAvailable: // retry, but store partial partition results
|
||||
retry = true
|
||||
break
|
||||
default: // don't retry, don't store partial results
|
||||
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
|
||||
err = topic.Err
|
||||
continue
|
||||
}
|
||||
|
||||
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
|
||||
for _, partition := range topic.Partitions {
|
||||
client.metadata[topic.Name][partition.ID] = partition
|
||||
if partition.Err == ErrLeaderNotAvailable {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
var partitionCache [maxPartitionIndex][]int32
|
||||
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
|
||||
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
|
||||
client.cachedPartitionsResults[topic.Name] = partitionCache
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
|
||||
return client.brokers[coordinatorID]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
|
||||
retry := func(err error) (*ConsumerMetadataResponse, error) {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
|
||||
|
||||
request := new(ConsumerMetadataRequest)
|
||||
request.ConsumerGroup = consumerGroup
|
||||
|
||||
response, err := broker.GetConsumerMetadata(request)
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
|
||||
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
return nil, err
|
||||
default:
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
switch response.Err {
|
||||
case ErrNoError:
|
||||
Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
|
||||
return response, nil
|
||||
|
||||
case ErrConsumerCoordinatorNotAvailable:
|
||||
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
|
||||
|
||||
// This is very ugly, but this scenario will only happen once per cluster.
|
||||
// The __consumer_offsets topic only has to be created one time.
|
||||
// The number of partitions not configurable, but partition 0 should always exist.
|
||||
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
|
||||
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
return retry(ErrConsumerCoordinatorNotAvailable)
|
||||
default:
|
||||
return nil, response.Err
|
||||
}
|
||||
}
|
||||
|
||||
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
|
||||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
608
vendor/src/github.com/Shopify/sarama/client_test.go
vendored
Normal file
608
vendor/src/github.com/Shopify/sarama/client_test.go
vendored
Normal file
@ -0,0 +1,608 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func safeClose(t testing.TB, c io.Closer) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleClient(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
seedBroker.Returns(new(MetadataResponse))
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestCachedPartitions(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
replicas := []int32{3, 1, 5}
|
||||
isr := []int32{5, 1}
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker("localhost:12345", 2)
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
c, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client := c.(*client)
|
||||
|
||||
// Verify they aren't cached the same
|
||||
allP := client.cachedPartitionsResults["my_topic"][allPartitions]
|
||||
writeP := client.cachedPartitionsResults["my_topic"][writablePartitions]
|
||||
if len(allP) == len(writeP) {
|
||||
t.Fatal("Invalid lengths!")
|
||||
}
|
||||
|
||||
tmp := client.cachedPartitionsResults["my_topic"]
|
||||
// Verify we actually use the cache at all!
|
||||
tmp[allPartitions] = []int32{1, 2, 3, 4}
|
||||
client.cachedPartitionsResults["my_topic"] = tmp
|
||||
if 4 != len(client.cachedPartitions("my_topic", allPartitions)) {
|
||||
t.Fatal("Not using the cache!")
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
replicas := []int32{seedBroker.BrokerID()}
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
metadataResponse = new(MetadataResponse)
|
||||
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
partitions, err := client.Partitions("unknown")
|
||||
|
||||
if err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
if partitions != nil {
|
||||
t.Errorf("Should return nil as partition list, found %v", partitions)
|
||||
}
|
||||
|
||||
// Should still use the cache of a known topic
|
||||
partitions, err = client.Partitions("my_topic")
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, found %v", err)
|
||||
}
|
||||
|
||||
metadataResponse = new(MetadataResponse)
|
||||
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// Should not use cache for unknown topic
|
||||
partitions, err = client.Partitions("unknown")
|
||||
if err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
if partitions != nil {
|
||||
t.Errorf("Should return nil as partition list, found %v", partitions)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientSeedBrokers(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker("localhost:12345", 2)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientMetadata(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 5)
|
||||
|
||||
replicas := []int32{3, 1, 5}
|
||||
isr := []int32{5, 1}
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
topics, err := client.Topics()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(topics) != 1 || topics[0] != "my_topic" {
|
||||
t.Error("Client returned incorrect topics:", topics)
|
||||
}
|
||||
|
||||
parts, err := client.Partitions("my_topic")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 {
|
||||
t.Error("Client returned incorrect partitions for my_topic:", parts)
|
||||
}
|
||||
|
||||
parts, err = client.WritablePartitions("my_topic")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(parts) != 1 || parts[0] != 0 {
|
||||
t.Error("Client returned incorrect writable partitions for my_topic:", parts)
|
||||
}
|
||||
|
||||
tst, err := client.Leader("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if tst.ID() != 5 {
|
||||
t.Error("Leader for my_topic had incorrect ID.")
|
||||
}
|
||||
|
||||
replicas, err = client.Replicas("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if replicas[0] != 1 {
|
||||
t.Error("Incorrect (or unsorted) replica")
|
||||
} else if replicas[1] != 3 {
|
||||
t.Error("Incorrect (or unsorted) replica")
|
||||
} else if replicas[2] != 5 {
|
||||
t.Error("Incorrect (or unsorted) replica")
|
||||
}
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientGetOffset(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadata := new(MetadataResponse)
|
||||
metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadata.AddBroker(leaderAddr, leader.BrokerID())
|
||||
seedBroker.Returns(metadata)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
offsetResponse := new(OffsetResponse)
|
||||
offsetResponse.AddTopicPartition("foo", 0, 123)
|
||||
leader.Returns(offsetResponse)
|
||||
|
||||
offset, err := client.GetOffset("foo", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if offset != 123 {
|
||||
t.Error("Unexpected offset, got ", offset)
|
||||
}
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Returns(metadata)
|
||||
|
||||
leader = NewMockBrokerAddr(t, 2, leaderAddr)
|
||||
offsetResponse = new(OffsetResponse)
|
||||
offsetResponse.AddTopicPartition("foo", 0, 456)
|
||||
leader.Returns(offsetResponse)
|
||||
|
||||
offset, err = client.GetOffset("foo", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if offset != 456 {
|
||||
t.Error("Unexpected offset, got ", offset)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientReceivingUnknownTopic(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Metadata.Retry.Backoff = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
metadataUnknownTopic := new(MetadataResponse)
|
||||
metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
|
||||
if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("ErrUnknownTopicOrPartition expected, got", err)
|
||||
}
|
||||
|
||||
// If we are asking for the leader of a partition of the non-existing topic.
|
||||
// we will request metadata again.
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
seedBroker.Returns(metadataUnknownTopic)
|
||||
|
||||
if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestClientReceivingPartialMetadata(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 5)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()}
|
||||
|
||||
metadataPartial := new(MetadataResponse)
|
||||
metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable)
|
||||
metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError)
|
||||
metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable)
|
||||
seedBroker.Returns(metadataPartial)
|
||||
|
||||
if err := client.RefreshMetadata("new_topic"); err != nil {
|
||||
t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error")
|
||||
}
|
||||
|
||||
// Even though the metadata was incomplete, we should be able to get the leader of a partition
|
||||
// for which we did get a useful response, without doing additional requests.
|
||||
|
||||
partition0Leader, err := client.Leader("new_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if partition0Leader.Addr() != leader.Addr() {
|
||||
t.Error("Unexpected leader returned", partition0Leader.Addr())
|
||||
}
|
||||
|
||||
// If we are asking for the leader of a partition that didn't have a leader before,
|
||||
// we will do another metadata request.
|
||||
|
||||
seedBroker.Returns(metadataPartial)
|
||||
|
||||
// Still no leader for the partition, so asking for it should return an error.
|
||||
_, err = client.Leader("new_topic", 1)
|
||||
if err != ErrLeaderNotAvailable {
|
||||
t.Error("Expected ErrLeaderNotAvailable, got", err)
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestClientRefreshBehaviour(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
leader := NewMockBroker(t, 5)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
metadataResponse2 := new(MetadataResponse)
|
||||
metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse2)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parts, err := client.Partitions("my_topic")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if len(parts) != 1 || parts[0] != 0xb {
|
||||
t.Error("Client returned incorrect partitions for my_topic:", parts)
|
||||
}
|
||||
|
||||
tst, err := client.Leader("my_topic", 0xb)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else if tst.ID() != 5 {
|
||||
t.Error("Leader for my_topic had incorrect ID.")
|
||||
}
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientResurrectDeadSeeds(t *testing.T) {
|
||||
initialSeed := NewMockBroker(t, 0)
|
||||
emptyMetadata := new(MetadataResponse)
|
||||
initialSeed.Returns(emptyMetadata)
|
||||
|
||||
conf := NewConfig()
|
||||
conf.Metadata.Retry.Backoff = 0
|
||||
conf.Metadata.RefreshFrequency = 0
|
||||
c, err := NewClient([]string{initialSeed.Addr()}, conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
initialSeed.Close()
|
||||
|
||||
client := c.(*client)
|
||||
|
||||
seed1 := NewMockBroker(t, 1)
|
||||
seed2 := NewMockBroker(t, 2)
|
||||
seed3 := NewMockBroker(t, 3)
|
||||
addr1 := seed1.Addr()
|
||||
addr2 := seed2.Addr()
|
||||
addr3 := seed3.Addr()
|
||||
|
||||
// Overwrite the seed brokers with a fixed ordering to make this test deterministic.
|
||||
safeClose(t, client.seedBrokers[0])
|
||||
client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)}
|
||||
client.deadSeeds = []*Broker{}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := client.RefreshMetadata(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
seed1.Close()
|
||||
seed2.Close()
|
||||
|
||||
seed1 = NewMockBrokerAddr(t, 1, addr1)
|
||||
seed2 = NewMockBrokerAddr(t, 2, addr2)
|
||||
|
||||
seed3.Close()
|
||||
|
||||
seed1.Close()
|
||||
seed2.Returns(emptyMetadata)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(client.seedBrokers) != 2 {
|
||||
t.Error("incorrect number of live seeds")
|
||||
}
|
||||
if len(client.deadSeeds) != 1 {
|
||||
t.Error("incorrect number of dead seeds")
|
||||
}
|
||||
|
||||
safeClose(t, c)
|
||||
}
|
||||
|
||||
func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
staleCoordinator := NewMockBroker(t, 2)
|
||||
freshCoordinator := NewMockBroker(t, 3)
|
||||
|
||||
replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()}
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID())
|
||||
metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID())
|
||||
metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coordinatorResponse1 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
|
||||
seedBroker.Returns(coordinatorResponse1)
|
||||
|
||||
coordinatorResponse2 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID()
|
||||
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
|
||||
coordinatorResponse2.CoordinatorPort = staleCoordinator.Port()
|
||||
|
||||
seedBroker.Returns(coordinatorResponse2)
|
||||
|
||||
broker, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if staleCoordinator.Addr() != broker.Addr() {
|
||||
t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr())
|
||||
}
|
||||
|
||||
if staleCoordinator.BrokerID() != broker.ID() {
|
||||
t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID())
|
||||
}
|
||||
|
||||
// Grab the cached value
|
||||
broker2, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if broker2.Addr() != broker.Addr() {
|
||||
t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr())
|
||||
}
|
||||
|
||||
coordinatorResponse3 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID()
|
||||
coordinatorResponse3.CoordinatorHost = "127.0.0.1"
|
||||
coordinatorResponse3.CoordinatorPort = freshCoordinator.Port()
|
||||
|
||||
seedBroker.Returns(coordinatorResponse3)
|
||||
|
||||
// Refresh the locally cahced value because it's stale
|
||||
if err := client.RefreshCoordinator("my_group"); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Grab the fresh value
|
||||
broker3, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if broker3.Addr() != freshCoordinator.Addr() {
|
||||
t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr())
|
||||
}
|
||||
|
||||
freshCoordinator.Close()
|
||||
staleCoordinator.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
coordinator := NewMockBroker(t, 2)
|
||||
|
||||
metadataResponse1 := new(MetadataResponse)
|
||||
seedBroker.Returns(metadataResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Metadata.Retry.Backoff = 0
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coordinatorResponse1 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
|
||||
seedBroker.Returns(coordinatorResponse1)
|
||||
|
||||
metadataResponse2 := new(MetadataResponse)
|
||||
metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition)
|
||||
seedBroker.Returns(metadataResponse2)
|
||||
|
||||
replicas := []int32{coordinator.BrokerID()}
|
||||
metadataResponse3 := new(MetadataResponse)
|
||||
metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse3)
|
||||
|
||||
coordinatorResponse2 := new(ConsumerMetadataResponse)
|
||||
coordinatorResponse2.CoordinatorID = coordinator.BrokerID()
|
||||
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
|
||||
coordinatorResponse2.CoordinatorPort = coordinator.Port()
|
||||
|
||||
seedBroker.Returns(coordinatorResponse2)
|
||||
|
||||
broker, err := client.Coordinator("my_group")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if coordinator.Addr() != broker.Addr() {
|
||||
t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr())
|
||||
}
|
||||
|
||||
if coordinator.BrokerID() != broker.ID() {
|
||||
t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID())
|
||||
}
|
||||
|
||||
coordinator.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestClientAutorefreshShutdownRace(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
conf := NewConfig()
|
||||
conf.Metadata.RefreshFrequency = 100 * time.Millisecond
|
||||
client, err := NewClient([]string{seedBroker.Addr()}, conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the background refresh to kick in
|
||||
time.Sleep(110 * time.Millisecond)
|
||||
|
||||
done := make(chan none)
|
||||
go func() {
|
||||
// Close the client
|
||||
if err := client.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// Wait for the Close to kick in
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Then return some metadata to the still-running background thread
|
||||
leader := NewMockBroker(t, 2)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
<-done
|
||||
|
||||
seedBroker.Close()
|
||||
|
||||
// give the update time to happen so we get a panic if it's still running (which it shouldn't)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
417
vendor/src/github.com/Shopify/sarama/config.go
vendored
Normal file
417
vendor/src/github.com/Shopify/sarama/config.go
vendored
Normal file
@ -0,0 +1,417 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
const defaultClientID = "sarama"
|
||||
|
||||
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
|
||||
|
||||
// Config is used to pass multiple configuration options to Sarama's constructors.
|
||||
type Config struct {
|
||||
// Net is the namespace for network-level properties used by the Broker, and
|
||||
// shared by the Client/Producer/Consumer.
|
||||
Net struct {
|
||||
// How many outstanding requests a connection is allowed to have before
|
||||
// sending on it blocks (default 5).
|
||||
MaxOpenRequests int
|
||||
|
||||
// All three of the below configurations are similar to the
|
||||
// `socket.timeout.ms` setting in JVM kafka. All of them default
|
||||
// to 30 seconds.
|
||||
DialTimeout time.Duration // How long to wait for the initial connection.
|
||||
ReadTimeout time.Duration // How long to wait for a response.
|
||||
WriteTimeout time.Duration // How long to wait for a transmit.
|
||||
|
||||
TLS struct {
|
||||
// Whether or not to use TLS when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// The TLS configuration to use for secure connections if
|
||||
// enabled (defaults to nil).
|
||||
Config *tls.Config
|
||||
}
|
||||
|
||||
// SASL based authentication with broker. While there are multiple SASL authentication methods
|
||||
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
|
||||
SASL struct {
|
||||
// Whether or not to use SASL authentication when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// Whether or not to send the Kafka SASL handshake first if enabled
|
||||
// (defaults to true). You should only set this to false if you're using
|
||||
// a non-Kafka SASL proxy.
|
||||
Handshake bool
|
||||
//username and password for SASL/PLAIN authentication
|
||||
User string
|
||||
Password string
|
||||
}
|
||||
|
||||
// KeepAlive specifies the keep-alive period for an active network connection.
|
||||
// If zero, keep-alives are disabled. (default is 0: disabled).
|
||||
KeepAlive time.Duration
|
||||
}
|
||||
|
||||
// Metadata is the namespace for metadata management properties used by the
|
||||
// Client, and shared by the Producer/Consumer.
|
||||
Metadata struct {
|
||||
Retry struct {
|
||||
// The total number of times to retry a metadata request when the
|
||||
// cluster is in the middle of a leader election (default 3).
|
||||
Max int
|
||||
// How long to wait for leader election to occur before retrying
|
||||
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
|
||||
Backoff time.Duration
|
||||
}
|
||||
// How frequently to refresh the cluster metadata in the background.
|
||||
// Defaults to 10 minutes. Set to 0 to disable. Similar to
|
||||
// `topic.metadata.refresh.interval.ms` in the JVM version.
|
||||
RefreshFrequency time.Duration
|
||||
}
|
||||
|
||||
// Producer is the namespace for configuration related to producing messages,
|
||||
// used by the Producer.
|
||||
Producer struct {
|
||||
// The maximum permitted size of a message (defaults to 1000000). Should be
|
||||
// set equal to or smaller than the broker's `message.max.bytes`.
|
||||
MaxMessageBytes int
|
||||
// The level of acknowledgement reliability needed from the broker (defaults
|
||||
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
|
||||
// JVM producer.
|
||||
RequiredAcks RequiredAcks
|
||||
// The maximum duration the broker will wait the receipt of the number of
|
||||
// RequiredAcks (defaults to 10 seconds). This is only relevant when
|
||||
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
|
||||
// millisecond resolution, nanoseconds will be truncated. Equivalent to
|
||||
// the JVM producer's `request.timeout.ms` setting.
|
||||
Timeout time.Duration
|
||||
// The type of compression to use on messages (defaults to no compression).
|
||||
// Similar to `compression.codec` setting of the JVM producer.
|
||||
Compression CompressionCodec
|
||||
// Generates partitioners for choosing the partition to send messages to
|
||||
// (defaults to hashing the message key). Similar to the `partitioner.class`
|
||||
// setting for the JVM producer.
|
||||
Partitioner PartitionerConstructor
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from the respective channels to prevent deadlock.
|
||||
Return struct {
|
||||
// If enabled, successfully delivered messages will be returned on the
|
||||
// Successes channel (default disabled).
|
||||
Successes bool
|
||||
|
||||
// If enabled, messages that failed to deliver will be returned on the
|
||||
// Errors channel, including error (default enabled).
|
||||
Errors bool
|
||||
}
|
||||
|
||||
// The following config options control how often messages are batched up and
|
||||
// sent to the broker. By default, messages are sent as fast as possible, and
|
||||
// all messages received while the current batch is in-flight are placed
|
||||
// into the subsequent batch.
|
||||
Flush struct {
|
||||
// The best-effort number of bytes needed to trigger a flush. Use the
|
||||
// global sarama.MaxRequestSize to set a hard upper limit.
|
||||
Bytes int
|
||||
// The best-effort number of messages needed to trigger a flush. Use
|
||||
// `MaxMessages` to set a hard upper limit.
|
||||
Messages int
|
||||
// The best-effort frequency of flushes. Equivalent to
|
||||
// `queue.buffering.max.ms` setting of JVM producer.
|
||||
Frequency time.Duration
|
||||
// The maximum number of messages the producer will send in a single
|
||||
// broker request. Defaults to 0 for unlimited. Similar to
|
||||
// `queue.buffering.max.messages` in the JVM producer.
|
||||
MaxMessages int
|
||||
}
|
||||
|
||||
Retry struct {
|
||||
// The total number of times to retry sending a message (default 3).
|
||||
// Similar to the `message.send.max.retries` setting of the JVM producer.
|
||||
Max int
|
||||
// How long to wait for the cluster to settle between retries
|
||||
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
|
||||
// JVM producer.
|
||||
Backoff time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
// Consumer is the namespace for configuration related to consuming messages,
|
||||
// used by the Consumer.
|
||||
//
|
||||
// Note that Sarama's Consumer type does not currently support automatic
|
||||
// consumer-group rebalancing and offset tracking. For Zookeeper-based
|
||||
// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
|
||||
// library builds on Sarama to add this support. For Kafka-based tracking
|
||||
// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
|
||||
// builds on Sarama to add this support.
|
||||
Consumer struct {
|
||||
Retry struct {
|
||||
// How long to wait after a failing to read from a partition before
|
||||
// trying again (default 2s).
|
||||
Backoff time.Duration
|
||||
}
|
||||
|
||||
// Fetch is the namespace for controlling how many bytes are retrieved by any
|
||||
// given request.
|
||||
Fetch struct {
|
||||
// The minimum number of message bytes to fetch in a request - the broker
|
||||
// will wait until at least this many are available. The default is 1,
|
||||
// as 0 causes the consumer to spin when no messages are available.
|
||||
// Equivalent to the JVM's `fetch.min.bytes`.
|
||||
Min int32
|
||||
// The default number of message bytes to fetch from the broker in each
|
||||
// request (default 32768). This should be larger than the majority of
|
||||
// your messages, or else the consumer will spend a lot of time
|
||||
// negotiating sizes and not actually consuming. Similar to the JVM's
|
||||
// `fetch.message.max.bytes`.
|
||||
Default int32
|
||||
// The maximum number of message bytes to fetch from the broker in a
|
||||
// single request. Messages larger than this will return
|
||||
// ErrMessageTooLarge and will not be consumable, so you must be sure
|
||||
// this is at least as large as your largest message. Defaults to 0
|
||||
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
|
||||
// global `sarama.MaxResponseSize` still applies.
|
||||
Max int32
|
||||
}
|
||||
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
|
||||
// bytes to become available before it returns fewer than that anyways. The
|
||||
// default is 250ms, since 0 causes the consumer to spin when no events are
|
||||
// available. 100-500ms is a reasonable range for most cases. Kafka only
|
||||
// supports precision up to milliseconds; nanoseconds will be truncated.
|
||||
// Equivalent to the JVM's `fetch.wait.max.ms`.
|
||||
MaxWaitTime time.Duration
|
||||
|
||||
// The maximum amount of time the consumer expects a message takes to process
|
||||
// for the user. If writing to the Messages channel takes longer than this,
|
||||
// that partition will stop fetching more messages until it can proceed again.
|
||||
// Note that, since the Messages channel is buffered, the actual grace time is
|
||||
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
|
||||
MaxProcessingTime time.Duration
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from them to prevent deadlock.
|
||||
Return struct {
|
||||
// If enabled, any errors that occurred while consuming are returned on
|
||||
// the Errors channel (default disabled).
|
||||
Errors bool
|
||||
}
|
||||
|
||||
// Offsets specifies configuration for how and when to commit consumed
|
||||
// offsets. This currently requires the manual use of an OffsetManager
|
||||
// but will eventually be automated.
|
||||
Offsets struct {
|
||||
// How frequently to commit updated offsets. Defaults to 1s.
|
||||
CommitInterval time.Duration
|
||||
|
||||
// The initial offset to use if no offset was previously committed.
|
||||
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
|
||||
Initial int64
|
||||
|
||||
// The retention duration for committed offsets. If zero, disabled
|
||||
// (in which case the `offsets.retention.minutes` option on the
|
||||
// broker will be used). Kafka only supports precision up to
|
||||
// milliseconds; nanoseconds will be truncated. Requires Kafka
|
||||
// broker version 0.9.0 or later.
|
||||
// (default is 0: disabled).
|
||||
Retention time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
// A user-provided string sent with every request to the brokers for logging,
|
||||
// debugging, and auditing purposes. Defaults to "sarama", but you should
|
||||
// probably set it to something specific to your application.
|
||||
ClientID string
|
||||
// The number of events to buffer in internal and external channels. This
|
||||
// permits the producer and consumer to continue processing some messages
|
||||
// in the background while user code is working, greatly improving throughput.
|
||||
// Defaults to 256.
|
||||
ChannelBufferSize int
|
||||
// The version of Kafka that Sarama will assume it is running against.
|
||||
// Defaults to the oldest supported stable version. Since Kafka provides
|
||||
// backwards-compatibility, setting it to a version older than you have
|
||||
// will not break anything, although it may prevent you from using the
|
||||
// latest features. Setting it to a version greater than you are actually
|
||||
// running may lead to random breakage.
|
||||
Version KafkaVersion
|
||||
// The registry to define metrics into.
|
||||
// Defaults to a local registry.
|
||||
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
|
||||
// prior to starting Sarama.
|
||||
// See Examples on how to use the metrics registry
|
||||
MetricRegistry metrics.Registry
|
||||
}
|
||||
|
||||
// NewConfig returns a new configuration instance with sane defaults.
|
||||
func NewConfig() *Config {
|
||||
c := &Config{}
|
||||
|
||||
c.Net.MaxOpenRequests = 5
|
||||
c.Net.DialTimeout = 30 * time.Second
|
||||
c.Net.ReadTimeout = 30 * time.Second
|
||||
c.Net.WriteTimeout = 30 * time.Second
|
||||
c.Net.SASL.Handshake = true
|
||||
|
||||
c.Metadata.Retry.Max = 3
|
||||
c.Metadata.Retry.Backoff = 250 * time.Millisecond
|
||||
c.Metadata.RefreshFrequency = 10 * time.Minute
|
||||
|
||||
c.Producer.MaxMessageBytes = 1000000
|
||||
c.Producer.RequiredAcks = WaitForLocal
|
||||
c.Producer.Timeout = 10 * time.Second
|
||||
c.Producer.Partitioner = NewHashPartitioner
|
||||
c.Producer.Retry.Max = 3
|
||||
c.Producer.Retry.Backoff = 100 * time.Millisecond
|
||||
c.Producer.Return.Errors = true
|
||||
|
||||
c.Consumer.Fetch.Min = 1
|
||||
c.Consumer.Fetch.Default = 32768
|
||||
c.Consumer.Retry.Backoff = 2 * time.Second
|
||||
c.Consumer.MaxWaitTime = 250 * time.Millisecond
|
||||
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
|
||||
c.Consumer.Return.Errors = false
|
||||
c.Consumer.Offsets.CommitInterval = 1 * time.Second
|
||||
c.Consumer.Offsets.Initial = OffsetNewest
|
||||
|
||||
c.ClientID = defaultClientID
|
||||
c.ChannelBufferSize = 256
|
||||
c.Version = minVersion
|
||||
c.MetricRegistry = metrics.NewRegistry()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Validate checks a Config instance. It will return a
|
||||
// ConfigurationError if the specified values don't make sense.
|
||||
func (c *Config) Validate() error {
|
||||
// some configuration values should be warned on but not fail completely, do those first
|
||||
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
|
||||
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
|
||||
}
|
||||
if c.Net.SASL.Enable == false {
|
||||
if c.Net.SASL.User != "" {
|
||||
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
|
||||
}
|
||||
if c.Net.SASL.Password != "" {
|
||||
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
|
||||
}
|
||||
}
|
||||
if c.Producer.RequiredAcks > 1 {
|
||||
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
|
||||
}
|
||||
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
|
||||
Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.")
|
||||
}
|
||||
if c.Producer.Timeout%time.Millisecond != 0 {
|
||||
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
|
||||
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
|
||||
}
|
||||
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
|
||||
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
|
||||
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
|
||||
}
|
||||
if c.ClientID == defaultClientID {
|
||||
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
|
||||
}
|
||||
|
||||
// validate Net values
|
||||
switch {
|
||||
case c.Net.MaxOpenRequests <= 0:
|
||||
return ConfigurationError("Net.MaxOpenRequests must be > 0")
|
||||
case c.Net.DialTimeout <= 0:
|
||||
return ConfigurationError("Net.DialTimeout must be > 0")
|
||||
case c.Net.ReadTimeout <= 0:
|
||||
return ConfigurationError("Net.ReadTimeout must be > 0")
|
||||
case c.Net.WriteTimeout <= 0:
|
||||
return ConfigurationError("Net.WriteTimeout must be > 0")
|
||||
case c.Net.KeepAlive < 0:
|
||||
return ConfigurationError("Net.KeepAlive must be >= 0")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
|
||||
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
|
||||
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
|
||||
}
|
||||
|
||||
// validate the Metadata values
|
||||
switch {
|
||||
case c.Metadata.Retry.Max < 0:
|
||||
return ConfigurationError("Metadata.Retry.Max must be >= 0")
|
||||
case c.Metadata.Retry.Backoff < 0:
|
||||
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
|
||||
case c.Metadata.RefreshFrequency < 0:
|
||||
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
|
||||
}
|
||||
|
||||
// validate the Producer values
|
||||
switch {
|
||||
case c.Producer.MaxMessageBytes <= 0:
|
||||
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
|
||||
case c.Producer.RequiredAcks < -1:
|
||||
return ConfigurationError("Producer.RequiredAcks must be >= -1")
|
||||
case c.Producer.Timeout <= 0:
|
||||
return ConfigurationError("Producer.Timeout must be > 0")
|
||||
case c.Producer.Partitioner == nil:
|
||||
return ConfigurationError("Producer.Partitioner must not be nil")
|
||||
case c.Producer.Flush.Bytes < 0:
|
||||
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
|
||||
case c.Producer.Flush.Messages < 0:
|
||||
return ConfigurationError("Producer.Flush.Messages must be >= 0")
|
||||
case c.Producer.Flush.Frequency < 0:
|
||||
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
|
||||
case c.Producer.Flush.MaxMessages < 0:
|
||||
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
|
||||
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
|
||||
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
|
||||
case c.Producer.Retry.Max < 0:
|
||||
return ConfigurationError("Producer.Retry.Max must be >= 0")
|
||||
case c.Producer.Retry.Backoff < 0:
|
||||
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
|
||||
}
|
||||
|
||||
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
|
||||
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
|
||||
}
|
||||
|
||||
// validate the Consumer values
|
||||
switch {
|
||||
case c.Consumer.Fetch.Min <= 0:
|
||||
return ConfigurationError("Consumer.Fetch.Min must be > 0")
|
||||
case c.Consumer.Fetch.Default <= 0:
|
||||
return ConfigurationError("Consumer.Fetch.Default must be > 0")
|
||||
case c.Consumer.Fetch.Max < 0:
|
||||
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
|
||||
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
|
||||
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
|
||||
case c.Consumer.MaxProcessingTime <= 0:
|
||||
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
|
||||
case c.Consumer.Retry.Backoff < 0:
|
||||
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
|
||||
case c.Consumer.Offsets.CommitInterval <= 0:
|
||||
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
|
||||
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
|
||||
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
|
||||
|
||||
}
|
||||
|
||||
// validate misc shared values
|
||||
switch {
|
||||
case c.ChannelBufferSize < 0:
|
||||
return ConfigurationError("ChannelBufferSize must be >= 0")
|
||||
case !validID.MatchString(c.ClientID):
|
||||
return ConfigurationError("ClientID is invalid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
70
vendor/src/github.com/Shopify/sarama/config_test.go
vendored
Normal file
70
vendor/src/github.com/Shopify/sarama/config_test.go
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
func TestDefaultConfigValidates(t *testing.T) {
|
||||
config := NewConfig()
|
||||
if err := config.Validate(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if config.MetricRegistry == nil {
|
||||
t.Error("Expected non nil metrics.MetricRegistry, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidClientIDConfigValidates(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.ClientID = "foo:bar"
|
||||
if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
|
||||
t.Error("Expected invalid ClientID, got ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyClientIDConfigValidates(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.ClientID = ""
|
||||
if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
|
||||
t.Error("Expected invalid ClientID, got ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLZ4ConfigValidation(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Compression = CompressionLZ4
|
||||
if err := config.Validate(); string(err.(ConfigurationError)) != "lz4 compression requires Version >= V0_10_0_0" {
|
||||
t.Error("Expected invalid lz4/kakfa version error, got ", err)
|
||||
}
|
||||
config.Version = V0_10_0_0
|
||||
if err := config.Validate(); err != nil {
|
||||
t.Error("Expected lz4 to work, got ", err)
|
||||
}
|
||||
}
|
||||
|
||||
// This example shows how to integrate with an existing registry as well as publishing metrics
|
||||
// on the standard output
|
||||
func ExampleConfig_metrics() {
|
||||
// Our application registry
|
||||
appMetricRegistry := metrics.NewRegistry()
|
||||
appGauge := metrics.GetOrRegisterGauge("m1", appMetricRegistry)
|
||||
appGauge.Update(1)
|
||||
|
||||
config := NewConfig()
|
||||
// Use a prefix registry instead of the default local one
|
||||
config.MetricRegistry = metrics.NewPrefixedChildRegistry(appMetricRegistry, "sarama.")
|
||||
|
||||
// Simulate a metric created by sarama without starting a broker
|
||||
saramaGauge := metrics.GetOrRegisterGauge("m2", config.MetricRegistry)
|
||||
saramaGauge.Update(2)
|
||||
|
||||
metrics.WriteOnce(appMetricRegistry, os.Stdout)
|
||||
// Output:
|
||||
// gauge m1
|
||||
// value: 1
|
||||
// gauge sarama.m2
|
||||
// value: 2
|
||||
}
|
735
vendor/src/github.com/Shopify/sarama/consumer.go
vendored
Normal file
735
vendor/src/github.com/Shopify/sarama/consumer.go
vendored
Normal file
@ -0,0 +1,735 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
|
||||
type ConsumerMessage struct {
|
||||
Key, Value []byte
|
||||
Topic string
|
||||
Partition int32
|
||||
Offset int64
|
||||
Timestamp time.Time // only set if kafka is version 0.10+
|
||||
}
|
||||
|
||||
// ConsumerError is what is provided to the user when an error occurs.
|
||||
// It wraps an error and includes the topic and partition.
|
||||
type ConsumerError struct {
|
||||
Topic string
|
||||
Partition int32
|
||||
Err error
|
||||
}
|
||||
|
||||
func (ce ConsumerError) Error() string {
|
||||
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
|
||||
}
|
||||
|
||||
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
|
||||
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
|
||||
// when stopping.
|
||||
type ConsumerErrors []*ConsumerError
|
||||
|
||||
func (ce ConsumerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
|
||||
}
|
||||
|
||||
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
|
||||
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
//
|
||||
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
|
||||
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
|
||||
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
|
||||
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
|
||||
type Consumer interface {
|
||||
|
||||
// Topics returns the set of available topics as retrieved from the cluster
|
||||
// metadata. This method is the same as Client.Topics(), and is provided for
|
||||
// convenience.
|
||||
Topics() ([]string, error)
|
||||
|
||||
// Partitions returns the sorted list of all partition IDs for the given topic.
|
||||
// This method is the same as Client.Partitions(), and is provided for convenience.
|
||||
Partitions(topic string) ([]int32, error)
|
||||
|
||||
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
|
||||
// the given offset. It will return an error if this Consumer is already consuming
|
||||
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
|
||||
// or OffsetOldest
|
||||
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
|
||||
|
||||
// HighWaterMarks returns the current high water marks for each topic and partition.
|
||||
// Consistency between partitions is not guaranteed since high water marks are updated separately.
|
||||
HighWaterMarks() map[string]map[int32]int64
|
||||
|
||||
// Close shuts down the consumer. It must be called after all child
|
||||
// PartitionConsumers have already been closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type consumer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
lock sync.Mutex
|
||||
children map[string]map[int32]*partitionConsumer
|
||||
brokerConsumers map[*Broker]*brokerConsumer
|
||||
}
|
||||
|
||||
// NewConsumer creates a new consumer using the given broker addresses and configuration.
|
||||
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
|
||||
client, err := NewClient(addrs, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := NewConsumerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.(*consumer).ownClient = true
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewConsumerFromClient creates a new consumer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this consumer.
|
||||
func NewConsumerFromClient(client Client) (Consumer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
c := &consumer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
children: make(map[string]map[int32]*partitionConsumer),
|
||||
brokerConsumers: make(map[*Broker]*brokerConsumer),
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *consumer) Close() error {
|
||||
if c.ownClient {
|
||||
return c.client.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consumer) Topics() ([]string, error) {
|
||||
return c.client.Topics()
|
||||
}
|
||||
|
||||
func (c *consumer) Partitions(topic string) ([]int32, error) {
|
||||
return c.client.Partitions(topic)
|
||||
}
|
||||
|
||||
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
|
||||
child := &partitionConsumer{
|
||||
consumer: c,
|
||||
conf: c.conf,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
|
||||
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
|
||||
feeder: make(chan *FetchResponse, 1),
|
||||
trigger: make(chan none, 1),
|
||||
dying: make(chan none),
|
||||
fetchSize: c.conf.Consumer.Fetch.Default,
|
||||
}
|
||||
|
||||
if err := child.chooseStartingOffset(offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var leader *Broker
|
||||
var err error
|
||||
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.addChild(child); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go withRecover(child.dispatcher)
|
||||
go withRecover(child.responseFeeder)
|
||||
|
||||
child.broker = c.refBrokerConsumer(leader)
|
||||
child.broker.input <- child
|
||||
|
||||
return child, nil
|
||||
}
|
||||
|
||||
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
hwms := make(map[string]map[int32]int64)
|
||||
for topic, p := range c.children {
|
||||
hwm := make(map[int32]int64, len(p))
|
||||
for partition, pc := range p {
|
||||
hwm[partition] = pc.HighWaterMarkOffset()
|
||||
}
|
||||
hwms[topic] = hwm
|
||||
}
|
||||
|
||||
return hwms
|
||||
}
|
||||
|
||||
func (c *consumer) addChild(child *partitionConsumer) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
topicChildren := c.children[child.topic]
|
||||
if topicChildren == nil {
|
||||
topicChildren = make(map[int32]*partitionConsumer)
|
||||
c.children[child.topic] = topicChildren
|
||||
}
|
||||
|
||||
if topicChildren[child.partition] != nil {
|
||||
return ConfigurationError("That topic/partition is already being consumed")
|
||||
}
|
||||
|
||||
topicChildren[child.partition] = child
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *consumer) removeChild(child *partitionConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.children[child.topic], child.partition)
|
||||
}
|
||||
|
||||
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
bc := c.brokerConsumers[broker]
|
||||
if bc == nil {
|
||||
bc = c.newBrokerConsumer(broker)
|
||||
c.brokerConsumers[broker] = bc
|
||||
}
|
||||
|
||||
bc.refs++
|
||||
|
||||
return bc
|
||||
}
|
||||
|
||||
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
brokerWorker.refs--
|
||||
|
||||
if brokerWorker.refs == 0 {
|
||||
close(brokerWorker.input)
|
||||
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
|
||||
delete(c.brokerConsumers, brokerWorker.broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.brokerConsumers, brokerWorker.broker)
|
||||
}
|
||||
|
||||
// PartitionConsumer
|
||||
|
||||
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
|
||||
// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
|
||||
// when it passes out of scope.
|
||||
//
|
||||
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
|
||||
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
|
||||
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
|
||||
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
|
||||
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
|
||||
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
|
||||
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
|
||||
type PartitionConsumer interface {
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will
|
||||
// return immediately, after which you should wait until the 'messages' and
|
||||
// 'errors' channel are drained. It is required to call this function, or
|
||||
// Close before a consumer object passes out of scope, as it will otherwise
|
||||
// leak memory. You must call this before calling Close on the underlying client.
|
||||
AsyncClose()
|
||||
|
||||
// Close stops the PartitionConsumer from fetching messages. It is required to
|
||||
// call this function (or AsyncClose) before a consumer object passes out of
|
||||
// scope, as it will otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
Close() error
|
||||
|
||||
// Messages returns the read channel for the messages that are returned by
|
||||
// the broker.
|
||||
Messages() <-chan *ConsumerMessage
|
||||
|
||||
// Errors returns a read channel of errors that occurred during consuming, if
|
||||
// enabled. By default, errors are logged and not returned over this channel.
|
||||
// If you want to implement any custom error handling, set your config's
|
||||
// Consumer.Return.Errors setting to true, and read from this channel.
|
||||
Errors() <-chan *ConsumerError
|
||||
|
||||
// HighWaterMarkOffset returns the high water mark offset of the partition,
|
||||
// i.e. the offset that will be used for the next message that will be produced.
|
||||
// You can use this to determine how far behind the processing is.
|
||||
HighWaterMarkOffset() int64
|
||||
}
|
||||
|
||||
type partitionConsumer struct {
|
||||
consumer *consumer
|
||||
conf *Config
|
||||
topic string
|
||||
partition int32
|
||||
|
||||
broker *brokerConsumer
|
||||
messages chan *ConsumerMessage
|
||||
errors chan *ConsumerError
|
||||
feeder chan *FetchResponse
|
||||
|
||||
trigger, dying chan none
|
||||
responseResult error
|
||||
|
||||
fetchSize int32
|
||||
offset int64
|
||||
highWaterMarkOffset int64
|
||||
}
|
||||
|
||||
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
|
||||
|
||||
func (child *partitionConsumer) sendError(err error) {
|
||||
cErr := &ConsumerError{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if child.conf.Consumer.Return.Errors {
|
||||
child.errors <- cErr
|
||||
} else {
|
||||
Logger.Println(cErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) dispatcher() {
|
||||
for _ = range child.trigger {
|
||||
select {
|
||||
case <-child.dying:
|
||||
close(child.trigger)
|
||||
case <-time.After(child.conf.Consumer.Retry.Backoff):
|
||||
if child.broker != nil {
|
||||
child.consumer.unrefBrokerConsumer(child.broker)
|
||||
child.broker = nil
|
||||
}
|
||||
|
||||
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
|
||||
if err := child.dispatch(); err != nil {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if child.broker != nil {
|
||||
child.consumer.unrefBrokerConsumer(child.broker)
|
||||
}
|
||||
child.consumer.removeChild(child)
|
||||
close(child.feeder)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) dispatch() error {
|
||||
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var leader *Broker
|
||||
var err error
|
||||
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
child.broker = child.consumer.refBrokerConsumer(leader)
|
||||
|
||||
child.broker.input <- child
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
|
||||
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case offset == OffsetNewest:
|
||||
child.offset = newestOffset
|
||||
case offset == OffsetOldest:
|
||||
child.offset = oldestOffset
|
||||
case offset >= oldestOffset && offset <= newestOffset:
|
||||
child.offset = offset
|
||||
default:
|
||||
return ErrOffsetOutOfRange
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
|
||||
return child.messages
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
|
||||
return child.errors
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) AsyncClose() {
|
||||
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
|
||||
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
|
||||
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
|
||||
// also just close itself)
|
||||
close(child.dying)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Close() error {
|
||||
child.AsyncClose()
|
||||
|
||||
go withRecover(func() {
|
||||
for _ = range child.messages {
|
||||
// drain
|
||||
}
|
||||
})
|
||||
|
||||
var errors ConsumerErrors
|
||||
for err := range child.errors {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
|
||||
return atomic.LoadInt64(&child.highWaterMarkOffset)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) responseFeeder() {
|
||||
var msgs []*ConsumerMessage
|
||||
expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut := false
|
||||
|
||||
feederLoop:
|
||||
for response := range child.feeder {
|
||||
msgs, child.responseResult = child.parseResponse(response)
|
||||
|
||||
for i, msg := range msgs {
|
||||
if !expiryTimer.Stop() && !expireTimedOut {
|
||||
// expiryTimer was expired; clear out the waiting msg
|
||||
<-expiryTimer.C
|
||||
}
|
||||
expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
|
||||
expireTimedOut = false
|
||||
|
||||
select {
|
||||
case child.messages <- msg:
|
||||
case <-expiryTimer.C:
|
||||
expireTimedOut = true
|
||||
child.responseResult = errTimedOut
|
||||
child.broker.acks.Done()
|
||||
for _, msg = range msgs[i:] {
|
||||
child.messages <- msg
|
||||
}
|
||||
child.broker.input <- child
|
||||
continue feederLoop
|
||||
}
|
||||
}
|
||||
|
||||
child.broker.acks.Done()
|
||||
}
|
||||
|
||||
close(child.messages)
|
||||
close(child.errors)
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
|
||||
block := response.GetBlock(child.topic, child.partition)
|
||||
if block == nil {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if block.Err != ErrNoError {
|
||||
return nil, block.Err
|
||||
}
|
||||
|
||||
if len(block.MsgSet.Messages) == 0 {
|
||||
// We got no messages. If we got a trailing one then we need to ask for more data.
|
||||
// Otherwise we just poll again and wait for one to be produced...
|
||||
if block.MsgSet.PartialTrailingMessage {
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
|
||||
// we can't ask for more data, we've hit the configured limit
|
||||
child.sendError(ErrMessageTooLarge)
|
||||
child.offset++ // skip this one so we can keep processing future messages
|
||||
} else {
|
||||
child.fetchSize *= 2
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// we got messages, reset our fetch size in case it was increased for a previous request
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Default
|
||||
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
|
||||
|
||||
incomplete := false
|
||||
prelude := true
|
||||
var messages []*ConsumerMessage
|
||||
for _, msgBlock := range block.MsgSet.Messages {
|
||||
|
||||
for _, msg := range msgBlock.Messages() {
|
||||
offset := msg.Offset
|
||||
if msg.Msg.Version >= 1 {
|
||||
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
|
||||
offset += baseOffset
|
||||
}
|
||||
if prelude && offset < child.offset {
|
||||
continue
|
||||
}
|
||||
prelude = false
|
||||
|
||||
if offset >= child.offset {
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: msg.Msg.Key,
|
||||
Value: msg.Msg.Value,
|
||||
Offset: offset,
|
||||
Timestamp: msg.Msg.Timestamp,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
} else {
|
||||
incomplete = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if incomplete || len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// brokerConsumer
|
||||
|
||||
type brokerConsumer struct {
|
||||
consumer *consumer
|
||||
broker *Broker
|
||||
input chan *partitionConsumer
|
||||
newSubscriptions chan []*partitionConsumer
|
||||
wait chan none
|
||||
subscriptions map[*partitionConsumer]none
|
||||
acks sync.WaitGroup
|
||||
refs int
|
||||
}
|
||||
|
||||
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
|
||||
bc := &brokerConsumer{
|
||||
consumer: c,
|
||||
broker: broker,
|
||||
input: make(chan *partitionConsumer),
|
||||
newSubscriptions: make(chan []*partitionConsumer),
|
||||
wait: make(chan none),
|
||||
subscriptions: make(map[*partitionConsumer]none),
|
||||
refs: 0,
|
||||
}
|
||||
|
||||
go withRecover(bc.subscriptionManager)
|
||||
go withRecover(bc.subscriptionConsumer)
|
||||
|
||||
return bc
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) subscriptionManager() {
|
||||
var buffer []*partitionConsumer
|
||||
|
||||
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
|
||||
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
|
||||
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
|
||||
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
|
||||
// so the main goroutine can block waiting for work if it has none.
|
||||
for {
|
||||
if len(buffer) > 0 {
|
||||
select {
|
||||
case event, ok := <-bc.input:
|
||||
if !ok {
|
||||
goto done
|
||||
}
|
||||
buffer = append(buffer, event)
|
||||
case bc.newSubscriptions <- buffer:
|
||||
buffer = nil
|
||||
case bc.wait <- none{}:
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case event, ok := <-bc.input:
|
||||
if !ok {
|
||||
goto done
|
||||
}
|
||||
buffer = append(buffer, event)
|
||||
case bc.newSubscriptions <- nil:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
close(bc.wait)
|
||||
if len(buffer) > 0 {
|
||||
bc.newSubscriptions <- buffer
|
||||
}
|
||||
close(bc.newSubscriptions)
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) subscriptionConsumer() {
|
||||
<-bc.wait // wait for our first piece of work
|
||||
|
||||
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
|
||||
for newSubscriptions := range bc.newSubscriptions {
|
||||
bc.updateSubscriptions(newSubscriptions)
|
||||
|
||||
if len(bc.subscriptions) == 0 {
|
||||
// We're about to be shut down or we're about to receive more subscriptions.
|
||||
// Either way, the signal just hasn't propagated to our goroutine yet.
|
||||
<-bc.wait
|
||||
continue
|
||||
}
|
||||
|
||||
response, err := bc.fetchNewMessages()
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
|
||||
bc.abort(err)
|
||||
return
|
||||
}
|
||||
|
||||
bc.acks.Add(len(bc.subscriptions))
|
||||
for child := range bc.subscriptions {
|
||||
child.feeder <- response
|
||||
}
|
||||
bc.acks.Wait()
|
||||
bc.handleResponses()
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
|
||||
for _, child := range newSubscriptions {
|
||||
bc.subscriptions[child] = none{}
|
||||
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
select {
|
||||
case <-child.dying:
|
||||
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
|
||||
close(child.trigger)
|
||||
delete(bc.subscriptions, child)
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) handleResponses() {
|
||||
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
|
||||
for child := range bc.subscriptions {
|
||||
result := child.responseResult
|
||||
child.responseResult = nil
|
||||
|
||||
switch result {
|
||||
case nil:
|
||||
break
|
||||
case errTimedOut:
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
|
||||
bc.broker.ID(), child.topic, child.partition)
|
||||
delete(bc.subscriptions, child)
|
||||
case ErrOffsetOutOfRange:
|
||||
// there's no point in retrying this it will just fail the same way again
|
||||
// shut it down and force the user to choose what to do
|
||||
child.sendError(result)
|
||||
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
|
||||
close(child.trigger)
|
||||
delete(bc.subscriptions, child)
|
||||
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
|
||||
// not an error, but does need redispatching
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
|
||||
bc.broker.ID(), child.topic, child.partition, result)
|
||||
child.trigger <- none{}
|
||||
delete(bc.subscriptions, child)
|
||||
default:
|
||||
// dunno, tell the user and try redispatching
|
||||
child.sendError(result)
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
|
||||
bc.broker.ID(), child.topic, child.partition, result)
|
||||
child.trigger <- none{}
|
||||
delete(bc.subscriptions, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) abort(err error) {
|
||||
bc.consumer.abandonBrokerConsumer(bc)
|
||||
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
|
||||
for newSubscriptions := range bc.newSubscriptions {
|
||||
if len(newSubscriptions) == 0 {
|
||||
<-bc.wait
|
||||
continue
|
||||
}
|
||||
for _, child := range newSubscriptions {
|
||||
child.sendError(err)
|
||||
child.trigger <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
|
||||
request := &FetchRequest{
|
||||
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
|
||||
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
request.Version = 2
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
|
||||
}
|
||||
|
||||
return bc.broker.Fetch(request)
|
||||
}
|
94
vendor/src/github.com/Shopify/sarama/consumer_group_members.go
vendored
Normal file
94
vendor/src/github.com/Shopify/sarama/consumer_group_members.go
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
package sarama
|
||||
|
||||
type ConsumerGroupMemberMetadata struct {
|
||||
Version int16
|
||||
Topics []string
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
|
||||
pe.putInt16(m.Version)
|
||||
|
||||
if err := pe.putStringArray(m.Topics); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putBytes(m.UserData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
|
||||
if m.Version, err = pd.getInt16(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m.Topics, err = pd.getStringArray(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m.UserData, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConsumerGroupMemberAssignment struct {
|
||||
Version int16
|
||||
Topics map[string][]int32
|
||||
UserData []byte
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
|
||||
pe.putInt16(m.Version)
|
||||
|
||||
if err := pe.putArrayLength(len(m.Topics)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range m.Topics {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := pe.putBytes(m.UserData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
|
||||
if m.Version, err = pd.getInt16(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var topicLen int
|
||||
if topicLen, err = pd.getArrayLength(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.Topics = make(map[string][]int32, topicLen)
|
||||
for i := 0; i < topicLen; i++ {
|
||||
var topic string
|
||||
if topic, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if m.UserData, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
73
vendor/src/github.com/Shopify/sarama/consumer_group_members_test.go
vendored
Normal file
73
vendor/src/github.com/Shopify/sarama/consumer_group_members_test.go
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
groupMemberMetadata = []byte{
|
||||
0, 1, // Version
|
||||
0, 0, 0, 2, // Topic array length
|
||||
0, 3, 'o', 'n', 'e', // Topic one
|
||||
0, 3, 't', 'w', 'o', // Topic two
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
|
||||
}
|
||||
groupMemberAssignment = []byte{
|
||||
0, 1, // Version
|
||||
0, 0, 0, 1, // Topic array length
|
||||
0, 3, 'o', 'n', 'e', // Topic one
|
||||
0, 0, 0, 3, // Topic one, partition array length
|
||||
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
|
||||
}
|
||||
)
|
||||
|
||||
func TestConsumerGroupMemberMetadata(t *testing.T) {
|
||||
meta := &ConsumerGroupMemberMetadata{
|
||||
Version: 1,
|
||||
Topics: []string{"one", "two"},
|
||||
UserData: []byte{0x01, 0x02, 0x03},
|
||||
}
|
||||
|
||||
buf, err := encode(meta, nil)
|
||||
if err != nil {
|
||||
t.Error("Failed to encode data", err)
|
||||
} else if !bytes.Equal(groupMemberMetadata, buf) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadata, buf)
|
||||
}
|
||||
|
||||
meta2 := new(ConsumerGroupMemberMetadata)
|
||||
err = decode(buf, meta2)
|
||||
if err != nil {
|
||||
t.Error("Failed to decode data", err)
|
||||
} else if !reflect.DeepEqual(meta, meta2) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerGroupMemberAssignment(t *testing.T) {
|
||||
amt := &ConsumerGroupMemberAssignment{
|
||||
Version: 1,
|
||||
Topics: map[string][]int32{
|
||||
"one": []int32{0, 2, 4},
|
||||
},
|
||||
UserData: []byte{0x01, 0x02, 0x03},
|
||||
}
|
||||
|
||||
buf, err := encode(amt, nil)
|
||||
if err != nil {
|
||||
t.Error("Failed to encode data", err)
|
||||
} else if !bytes.Equal(groupMemberAssignment, buf) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignment, buf)
|
||||
}
|
||||
|
||||
amt2 := new(ConsumerGroupMemberAssignment)
|
||||
err = decode(buf, amt2)
|
||||
if err != nil {
|
||||
t.Error("Failed to decode data", err)
|
||||
} else if !reflect.DeepEqual(amt, amt2) {
|
||||
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2)
|
||||
}
|
||||
}
|
26
vendor/src/github.com/Shopify/sarama/consumer_metadata_request.go
vendored
Normal file
26
vendor/src/github.com/Shopify/sarama/consumer_metadata_request.go
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package sarama
|
||||
|
||||
type ConsumerMetadataRequest struct {
|
||||
ConsumerGroup string
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
|
||||
return pe.putString(r.ConsumerGroup)
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.ConsumerGroup, err = pd.getString()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
19
vendor/src/github.com/Shopify/sarama/consumer_metadata_request_test.go
vendored
Normal file
19
vendor/src/github.com/Shopify/sarama/consumer_metadata_request_test.go
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
consumerMetadataRequestEmpty = []byte{
|
||||
0x00, 0x00}
|
||||
|
||||
consumerMetadataRequestString = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'}
|
||||
)
|
||||
|
||||
func TestConsumerMetadataRequest(t *testing.T) {
|
||||
request := new(ConsumerMetadataRequest)
|
||||
testRequest(t, "empty string", request, consumerMetadataRequestEmpty)
|
||||
|
||||
request.ConsumerGroup = "foobar"
|
||||
testRequest(t, "with string", request, consumerMetadataRequestString)
|
||||
}
|
85
vendor/src/github.com/Shopify/sarama/consumer_metadata_response.go
vendored
Normal file
85
vendor/src/github.com/Shopify/sarama/consumer_metadata_response.go
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ConsumerMetadataResponse struct {
|
||||
Err KError
|
||||
Coordinator *Broker
|
||||
CoordinatorID int32 // deprecated: use Coordinator.ID()
|
||||
CoordinatorHost string // deprecated: use Coordinator.Addr()
|
||||
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Err = KError(tmp)
|
||||
|
||||
coordinator := new(Broker)
|
||||
if err := coordinator.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
if coordinator.addr == ":0" {
|
||||
return nil
|
||||
}
|
||||
r.Coordinator = coordinator
|
||||
|
||||
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
|
||||
// backwards compatibility
|
||||
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.ParseInt(portstr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.CoordinatorID = r.Coordinator.ID()
|
||||
r.CoordinatorHost = host
|
||||
r.CoordinatorPort = int32(port)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if r.Coordinator != nil {
|
||||
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.ParseInt(portstr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.Coordinator.ID())
|
||||
if err := pe.putString(host); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(int32(port))
|
||||
return nil
|
||||
}
|
||||
pe.putInt32(r.CoordinatorID)
|
||||
if err := pe.putString(r.CoordinatorHost); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.CoordinatorPort)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
35
vendor/src/github.com/Shopify/sarama/consumer_metadata_response_test.go
vendored
Normal file
35
vendor/src/github.com/Shopify/sarama/consumer_metadata_response_test.go
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
consumerMetadataResponseError = []byte{
|
||||
0x00, 0x0E,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
consumerMetadataResponseSuccess = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xAB,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0xCC, 0xDD}
|
||||
)
|
||||
|
||||
func TestConsumerMetadataResponseError(t *testing.T) {
|
||||
response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress}
|
||||
testResponse(t, "error", &response, consumerMetadataResponseError)
|
||||
}
|
||||
|
||||
func TestConsumerMetadataResponseSuccess(t *testing.T) {
|
||||
broker := NewBroker("foo:52445")
|
||||
broker.id = 0xAB
|
||||
response := ConsumerMetadataResponse{
|
||||
Coordinator: broker,
|
||||
CoordinatorID: 0xAB,
|
||||
CoordinatorHost: "foo",
|
||||
CoordinatorPort: 0xCCDD,
|
||||
Err: ErrNoError,
|
||||
}
|
||||
testResponse(t, "success", &response, consumerMetadataResponseSuccess)
|
||||
}
|
854
vendor/src/github.com/Shopify/sarama/consumer_test.go
vendored
Normal file
854
vendor/src/github.com/Shopify/sarama/consumer_test.go
vendored
Normal file
@ -0,0 +1,854 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testMsg = StringEncoder("Foo")
|
||||
|
||||
// If a particular offset is provided then messages are consumed starting from
|
||||
// that offset.
|
||||
func TestConsumerOffsetManual(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
|
||||
mockFetchResponse := NewMockFetchResponse(t, 1)
|
||||
for i := 0; i < 10; i++ {
|
||||
mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
|
||||
}
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 2345),
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
// When
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 1234)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages starting from offset 1234 are consumed.
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case message := <-consumer.Messages():
|
||||
assertMessageOffset(t, message, int64(i+1234))
|
||||
case err := <-consumer.Errors():
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If `OffsetNewest` is passed as the initial offset then the first consumed
|
||||
// message is indeed corresponds to the offset that broker claims to be the
|
||||
// newest in its metadata response.
|
||||
func TestConsumerOffsetNewest(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 10).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 7),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 9, testMsg).
|
||||
SetMessage("my_topic", 0, 10, testMsg).
|
||||
SetMessage("my_topic", 0, 11, testMsg).
|
||||
SetHighWaterMark("my_topic", 0, 14),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then
|
||||
assertMessageOffset(t, <-consumer.Messages(), 10)
|
||||
if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
|
||||
t.Errorf("Expected high water mark offset 14, found %d", hwmo)
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// It is possible to close a partition consumer and create the same anew.
|
||||
func TestConsumerRecreate(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 10, testMsg),
|
||||
})
|
||||
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc, err := c.ConsumePartition("my_topic", 0, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertMessageOffset(t, <-pc.Messages(), 10)
|
||||
|
||||
// When
|
||||
safeClose(t, pc)
|
||||
pc, err = c.ConsumePartition("my_topic", 0, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then
|
||||
assertMessageOffset(t, <-pc.Messages(), 10)
|
||||
|
||||
safeClose(t, pc)
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// An attempt to consume the same partition twice should fail.
|
||||
func TestConsumerDuplicate(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 0
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc1, err := c.ConsumePartition("my_topic", 0, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
pc2, err := c.ConsumePartition("my_topic", 0, 0)
|
||||
|
||||
// Then
|
||||
if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
|
||||
t.Fatal("A partition cannot be consumed twice at the same time")
|
||||
}
|
||||
|
||||
safeClose(t, pc1)
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If consumer fails to refresh metadata it keeps retrying with frequency
|
||||
// specified by `Config.Consumer.Retry.Backoff`.
|
||||
func TestConsumerLeaderRefreshError(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 100)
|
||||
|
||||
// Stage 1: my_topic/0 served by broker0
|
||||
Logger.Printf(" STAGE 1")
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 123).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 123, testMsg),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.Net.ReadTimeout = 100 * time.Millisecond
|
||||
config.Consumer.Retry.Backoff = 200 * time.Millisecond
|
||||
config.Consumer.Return.Errors = true
|
||||
config.Metadata.Retry.Max = 0
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertMessageOffset(t, <-pc.Messages(), 123)
|
||||
|
||||
// Stage 2: broker0 says that it is no longer the leader for my_topic/0,
|
||||
// but the requests to retrieve metadata fail with network timeout.
|
||||
Logger.Printf(" STAGE 2")
|
||||
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockWrapper(fetchResponse2),
|
||||
})
|
||||
|
||||
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
|
||||
t.Errorf("Unexpected error: %v", consErr.Err)
|
||||
}
|
||||
|
||||
// Stage 3: finally the metadata returned by broker0 tells that broker1 is
|
||||
// a new leader for my_topic/0. Consumption resumes.
|
||||
|
||||
Logger.Printf(" STAGE 3")
|
||||
|
||||
broker1 := NewMockBroker(t, 101)
|
||||
|
||||
broker1.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 124, testMsg),
|
||||
})
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetBroker(broker1.Addr(), broker1.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker1.BrokerID()),
|
||||
})
|
||||
|
||||
assertMessageOffset(t, <-pc.Messages(), 124)
|
||||
|
||||
safeClose(t, pc)
|
||||
safeClose(t, c)
|
||||
broker1.Close()
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func TestConsumerInvalidTopic(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 100)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()),
|
||||
})
|
||||
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
|
||||
|
||||
// Then
|
||||
if pc != nil || err != ErrUnknownTopicOrPartition {
|
||||
t.Errorf("Should fail with, err=%v", err)
|
||||
}
|
||||
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// Nothing bad happens if a partition consumer that has no leader assigned at
|
||||
// the moment is closed.
|
||||
func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 100)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 123).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 123, testMsg),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.Net.ReadTimeout = 100 * time.Millisecond
|
||||
config.Consumer.Retry.Backoff = 100 * time.Millisecond
|
||||
config.Consumer.Return.Errors = true
|
||||
config.Metadata.Retry.Max = 0
|
||||
c, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertMessageOffset(t, <-pc.Messages(), 123)
|
||||
|
||||
// broker0 says that it is no longer the leader for my_topic/0, but the
|
||||
// requests to retrieve metadata fail with network timeout.
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockWrapper(fetchResponse2),
|
||||
})
|
||||
|
||||
// When
|
||||
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
|
||||
t.Errorf("Unexpected error: %v", consErr.Err)
|
||||
}
|
||||
|
||||
// Then: the partition consumer can be closed without any problem.
|
||||
safeClose(t, pc)
|
||||
safeClose(t, c)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If the initial offset passed on partition consumer creation is out of the
|
||||
// actual offset range for the partition, then the partition consumer stops
|
||||
// immediately closing its output channels.
|
||||
func TestConsumerShutsDownOutOfRange(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse := new(FetchResponse)
|
||||
fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 7),
|
||||
"FetchRequest": NewMockWrapper(fetchResponse),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 101)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: consumer should shut down closing its messages and errors channels.
|
||||
if _, ok := <-consumer.Messages(); ok {
|
||||
t.Error("Expected the consumer to shut down")
|
||||
}
|
||||
safeClose(t, consumer)
|
||||
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If a fetch response contains messages with offsets that are smaller then
|
||||
// requested, then such messages are ignored.
|
||||
func TestConsumerExtraOffsets(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4)
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNoError)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages with offsets 1 and 2 are not returned even though they
|
||||
// are present in the response.
|
||||
assertMessageOffset(t, <-consumer.Messages(), 3)
|
||||
assertMessageOffset(t, <-consumer.Messages(), 4)
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// It is fine if offsets of fetched messages are not sequential (although
|
||||
// strictly increasing!).
|
||||
func TestConsumerNonSequentialOffsets(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
fetchResponse1 := &FetchResponse{}
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7)
|
||||
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11)
|
||||
fetchResponse2 := &FetchResponse{}
|
||||
fetchResponse2.AddError("my_topic", 0, ErrNoError)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0),
|
||||
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When
|
||||
consumer, err := master.ConsumePartition("my_topic", 0, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then: messages with offsets 1 and 2 are not returned even though they
|
||||
// are present in the response.
|
||||
assertMessageOffset(t, <-consumer.Messages(), 5)
|
||||
assertMessageOffset(t, <-consumer.Messages(), 7)
|
||||
assertMessageOffset(t, <-consumer.Messages(), 11)
|
||||
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
// If leadership for a partition is changing then consumer resolves the new
|
||||
// leader and switches to it.
|
||||
func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
|
||||
// initial setup
|
||||
seedBroker := NewMockBroker(t, 10)
|
||||
leader0 := NewMockBroker(t, 0)
|
||||
leader1 := NewMockBroker(t, 1)
|
||||
|
||||
seedBroker.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(leader0.Addr(), leader0.BrokerID()).
|
||||
SetBroker(leader1.Addr(), leader1.BrokerID()).
|
||||
SetLeader("my_topic", 0, leader0.BrokerID()).
|
||||
SetLeader("my_topic", 1, leader1.BrokerID()),
|
||||
})
|
||||
|
||||
mockOffsetResponse1 := NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1000).
|
||||
SetOffset("my_topic", 1, OffsetOldest, 0).
|
||||
SetOffset("my_topic", 1, OffsetNewest, 1000)
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"OffsetRequest": mockOffsetResponse1,
|
||||
"FetchRequest": NewMockFetchResponse(t, 1),
|
||||
})
|
||||
leader1.SetHandlerByMap(map[string]MockResponse{
|
||||
"OffsetRequest": mockOffsetResponse1,
|
||||
"FetchRequest": NewMockFetchResponse(t, 1),
|
||||
})
|
||||
|
||||
// launch test goroutines
|
||||
config := NewConfig()
|
||||
config.Consumer.Retry.Backoff = 50
|
||||
master, err := NewConsumer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// we expect to end up (eventually) consuming exactly ten messages on each partition
|
||||
var wg sync.WaitGroup
|
||||
for i := int32(0); i < 2; i++ {
|
||||
consumer, err := master.ConsumePartition("my_topic", i, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
go func(c PartitionConsumer) {
|
||||
for err := range c.Errors() {
|
||||
t.Error(err)
|
||||
}
|
||||
}(consumer)
|
||||
|
||||
wg.Add(1)
|
||||
go func(partition int32, c PartitionConsumer) {
|
||||
for i := 0; i < 10; i++ {
|
||||
message := <-consumer.Messages()
|
||||
if message.Offset != int64(i) {
|
||||
t.Error("Incorrect message offset!", i, partition, message.Offset)
|
||||
}
|
||||
if message.Partition != partition {
|
||||
t.Error("Incorrect message partition!")
|
||||
}
|
||||
}
|
||||
safeClose(t, consumer)
|
||||
wg.Done()
|
||||
}(i, consumer)
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 1")
|
||||
// Stage 1:
|
||||
// * my_topic/0 -> leader0 serves 4 messages
|
||||
// * my_topic/1 -> leader1 serves 0 messages
|
||||
|
||||
mockFetchResponse := NewMockFetchResponse(t, 1)
|
||||
for i := 0; i < 4; i++ {
|
||||
mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
|
||||
}
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 2")
|
||||
// Stage 2:
|
||||
// * leader0 says that it is no longer serving my_topic/0
|
||||
// * seedBroker tells that leader1 is serving my_topic/0 now
|
||||
|
||||
// seed broker tells that the new partition 0 leader is leader1
|
||||
seedBroker.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetLeader("my_topic", 0, leader1.BrokerID()).
|
||||
SetLeader("my_topic", 1, leader1.BrokerID()),
|
||||
})
|
||||
|
||||
// leader0 says no longer leader of partition 0
|
||||
fetchResponse := new(FetchResponse)
|
||||
fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockWrapper(fetchResponse),
|
||||
})
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 3")
|
||||
// Stage 3:
|
||||
// * my_topic/0 -> leader1 serves 3 messages
|
||||
// * my_topic/1 -> leader1 server 8 messages
|
||||
|
||||
// leader1 provides 3 message on partition 0, and 8 messages on partition 1
|
||||
mockFetchResponse2 := NewMockFetchResponse(t, 2)
|
||||
for i := 4; i < 7; i++ {
|
||||
mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
|
||||
}
|
||||
for i := 0; i < 8; i++ {
|
||||
mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
|
||||
}
|
||||
leader1.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse2,
|
||||
})
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
Logger.Printf(" STAGE 4")
|
||||
// Stage 4:
|
||||
// * my_topic/0 -> leader1 serves 3 messages
|
||||
// * my_topic/1 -> leader1 tells that it is no longer the leader
|
||||
// * seedBroker tells that leader0 is a new leader for my_topic/1
|
||||
|
||||
// metadata assigns 0 to leader1 and 1 to leader0
|
||||
seedBroker.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetLeader("my_topic", 0, leader1.BrokerID()).
|
||||
SetLeader("my_topic", 1, leader0.BrokerID()),
|
||||
})
|
||||
|
||||
// leader1 provides three more messages on partition0, says no longer leader of partition1
|
||||
mockFetchResponse3 := NewMockFetchResponse(t, 3).
|
||||
SetMessage("my_topic", 0, int64(7), testMsg).
|
||||
SetMessage("my_topic", 0, int64(8), testMsg).
|
||||
SetMessage("my_topic", 0, int64(9), testMsg)
|
||||
fetchResponse4 := new(FetchResponse)
|
||||
fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
|
||||
leader1.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
|
||||
})
|
||||
|
||||
// leader0 provides two messages on partition 1
|
||||
mockFetchResponse4 := NewMockFetchResponse(t, 2)
|
||||
for i := 8; i < 10; i++ {
|
||||
mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
|
||||
}
|
||||
leader0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse4,
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
safeClose(t, master)
|
||||
leader1.Close()
|
||||
leader0.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
// When two partitions have the same broker as the leader, if one partition
|
||||
// consumer channel buffer is full then that does not affect the ability to
|
||||
// read messages by the other consumer.
|
||||
func TestConsumerInterleavedClose(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()).
|
||||
SetLeader("my_topic", 1, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 1000).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1100).
|
||||
SetOffset("my_topic", 1, OffsetOldest, 2000).
|
||||
SetOffset("my_topic", 1, OffsetNewest, 2100),
|
||||
"FetchRequest": NewMockFetchResponse(t, 1).
|
||||
SetMessage("my_topic", 0, 1000, testMsg).
|
||||
SetMessage("my_topic", 0, 1001, testMsg).
|
||||
SetMessage("my_topic", 0, 1002, testMsg).
|
||||
SetMessage("my_topic", 1, 2000, testMsg),
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 0
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c0, err := master.ConsumePartition("my_topic", 0, 1000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c1, err := master.ConsumePartition("my_topic", 1, 2000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When/Then: we can read from partition 0 even if nobody reads from partition 1
|
||||
assertMessageOffset(t, <-c0.Messages(), 1000)
|
||||
assertMessageOffset(t, <-c0.Messages(), 1001)
|
||||
assertMessageOffset(t, <-c0.Messages(), 1002)
|
||||
|
||||
safeClose(t, c1)
|
||||
safeClose(t, c0)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func TestConsumerBounceWithReferenceOpen(t *testing.T) {
|
||||
broker0 := NewMockBroker(t, 0)
|
||||
broker0Addr := broker0.Addr()
|
||||
broker1 := NewMockBroker(t, 1)
|
||||
|
||||
mockMetadataResponse := NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetBroker(broker1.Addr(), broker1.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()).
|
||||
SetLeader("my_topic", 1, broker1.BrokerID())
|
||||
|
||||
mockOffsetResponse := NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 1000).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1100).
|
||||
SetOffset("my_topic", 1, OffsetOldest, 2000).
|
||||
SetOffset("my_topic", 1, OffsetNewest, 2100)
|
||||
|
||||
mockFetchResponse := NewMockFetchResponse(t, 1)
|
||||
for i := 0; i < 10; i++ {
|
||||
mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
|
||||
mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
|
||||
}
|
||||
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"OffsetRequest": mockOffsetResponse,
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
broker1.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": mockMetadataResponse,
|
||||
"OffsetRequest": mockOffsetResponse,
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
config := NewConfig()
|
||||
config.Consumer.Return.Errors = true
|
||||
config.Consumer.Retry.Backoff = 100 * time.Millisecond
|
||||
config.ChannelBufferSize = 1
|
||||
master, err := NewConsumer([]string{broker1.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c0, err := master.ConsumePartition("my_topic", 0, 1000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c1, err := master.ConsumePartition("my_topic", 1, 2000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// read messages from both partition to make sure that both brokers operate
|
||||
// normally.
|
||||
assertMessageOffset(t, <-c0.Messages(), 1000)
|
||||
assertMessageOffset(t, <-c1.Messages(), 2000)
|
||||
|
||||
// Simulate broker shutdown. Note that metadata response does not change,
|
||||
// that is the leadership does not move to another broker. So partition
|
||||
// consumer will keep retrying to restore the connection with the broker.
|
||||
broker0.Close()
|
||||
|
||||
// Make sure that while the partition/0 leader is down, consumer/partition/1
|
||||
// is capable of pulling messages from broker1.
|
||||
for i := 1; i < 7; i++ {
|
||||
offset := (<-c1.Messages()).Offset
|
||||
if offset != int64(2000+i) {
|
||||
t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
|
||||
}
|
||||
}
|
||||
|
||||
// Bring broker0 back to service.
|
||||
broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"FetchRequest": mockFetchResponse,
|
||||
})
|
||||
|
||||
// Read the rest of messages from both partitions.
|
||||
for i := 7; i < 10; i++ {
|
||||
assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
|
||||
}
|
||||
for i := 1; i < 10; i++ {
|
||||
assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c0.Errors():
|
||||
default:
|
||||
t.Errorf("Partition consumer should have detected broker restart")
|
||||
}
|
||||
|
||||
safeClose(t, c1)
|
||||
safeClose(t, c0)
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
broker1.Close()
|
||||
}
|
||||
|
||||
func TestConsumerOffsetOutOfRange(t *testing.T) {
|
||||
// Given
|
||||
broker0 := NewMockBroker(t, 2)
|
||||
broker0.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetBroker(broker0.Addr(), broker0.BrokerID()).
|
||||
SetLeader("my_topic", 0, broker0.BrokerID()),
|
||||
"OffsetRequest": NewMockOffsetResponse(t).
|
||||
SetOffset("my_topic", 0, OffsetNewest, 1234).
|
||||
SetOffset("my_topic", 0, OffsetOldest, 2345),
|
||||
})
|
||||
|
||||
master, err := NewConsumer([]string{broker0.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// When/Then
|
||||
if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
|
||||
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
|
||||
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
|
||||
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
|
||||
safeClose(t, master)
|
||||
broker0.Close()
|
||||
}
|
||||
|
||||
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
|
||||
if msg.Offset != expectedOffset {
|
||||
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
// This example shows how to use the consumer to read messages
|
||||
// from a single partition.
|
||||
func ExampleConsumer() {
|
||||
consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := consumer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := partitionConsumer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Trap SIGINT to trigger a shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
consumed := 0
|
||||
ConsumerLoop:
|
||||
for {
|
||||
select {
|
||||
case msg := <-partitionConsumer.Messages():
|
||||
log.Printf("Consumed message offset %d\n", msg.Offset)
|
||||
consumed++
|
||||
case <-signals:
|
||||
break ConsumerLoop
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Consumed: %d\n", consumed)
|
||||
}
|
36
vendor/src/github.com/Shopify/sarama/crc32_field.go
vendored
Normal file
36
vendor/src/github.com/Shopify/sarama/crc32_field.go
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/klauspost/crc32"
|
||||
)
|
||||
|
||||
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
|
||||
type crc32Field struct {
|
||||
startOffset int
|
||||
}
|
||||
|
||||
func (c *crc32Field) saveOffset(in int) {
|
||||
c.startOffset = in
|
||||
}
|
||||
|
||||
func (c *crc32Field) reserveLength() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (c *crc32Field) run(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *crc32Field) check(curOffset int, buf []byte) error {
|
||||
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
|
||||
|
||||
if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
|
||||
return PacketDecodingError{"CRC didn't match"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
30
vendor/src/github.com/Shopify/sarama/describe_groups_request.go
vendored
Normal file
30
vendor/src/github.com/Shopify/sarama/describe_groups_request.go
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package sarama
|
||||
|
||||
type DescribeGroupsRequest struct {
|
||||
Groups []string
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
|
||||
return pe.putStringArray(r.Groups)
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Groups, err = pd.getStringArray()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) key() int16 {
|
||||
return 15
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) AddGroup(group string) {
|
||||
r.Groups = append(r.Groups, group)
|
||||
}
|
34
vendor/src/github.com/Shopify/sarama/describe_groups_request_test.go
vendored
Normal file
34
vendor/src/github.com/Shopify/sarama/describe_groups_request_test.go
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyDescribeGroupsRequest = []byte{0, 0, 0, 0}
|
||||
|
||||
singleDescribeGroupsRequest = []byte{
|
||||
0, 0, 0, 1, // 1 group
|
||||
0, 3, 'f', 'o', 'o', // group name: foo
|
||||
}
|
||||
|
||||
doubleDescribeGroupsRequest = []byte{
|
||||
0, 0, 0, 2, // 2 groups
|
||||
0, 3, 'f', 'o', 'o', // group name: foo
|
||||
0, 3, 'b', 'a', 'r', // group name: foo
|
||||
}
|
||||
)
|
||||
|
||||
func TestDescribeGroupsRequest(t *testing.T) {
|
||||
var request *DescribeGroupsRequest
|
||||
|
||||
request = new(DescribeGroupsRequest)
|
||||
testRequest(t, "no groups", request, emptyDescribeGroupsRequest)
|
||||
|
||||
request = new(DescribeGroupsRequest)
|
||||
request.AddGroup("foo")
|
||||
testRequest(t, "one group", request, singleDescribeGroupsRequest)
|
||||
|
||||
request = new(DescribeGroupsRequest)
|
||||
request.AddGroup("foo")
|
||||
request.AddGroup("bar")
|
||||
testRequest(t, "two groups", request, doubleDescribeGroupsRequest)
|
||||
}
|
186
vendor/src/github.com/Shopify/sarama/describe_groups_response.go
vendored
Normal file
186
vendor/src/github.com/Shopify/sarama/describe_groups_response.go
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
package sarama
|
||||
|
||||
type DescribeGroupsResponse struct {
|
||||
Groups []*GroupDescription
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Groups)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, groupDescription := range r.Groups {
|
||||
if err := groupDescription.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Groups = make([]*GroupDescription, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Groups[i] = new(GroupDescription)
|
||||
if err := r.Groups[i].decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) key() int16 {
|
||||
return 15
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
type GroupDescription struct {
|
||||
Err KError
|
||||
GroupId string
|
||||
State string
|
||||
ProtocolType string
|
||||
Protocol string
|
||||
Members map[string]*GroupMemberDescription
|
||||
}
|
||||
|
||||
func (gd *GroupDescription) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(gd.Err))
|
||||
|
||||
if err := pe.putString(gd.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.State); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.ProtocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gd.Protocol); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(gd.Members)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for memberId, groupMemberDescription := range gd.Members {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := groupMemberDescription.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
gd.Err = KError(kerr)
|
||||
}
|
||||
|
||||
if gd.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.State, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.ProtocolType, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gd.Protocol, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
gd.Members = make(map[string]*GroupMemberDescription)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gd.Members[memberId] = new(GroupMemberDescription)
|
||||
if err := gd.Members[memberId].decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type GroupMemberDescription struct {
|
||||
ClientId string
|
||||
ClientHost string
|
||||
MemberMetadata []byte
|
||||
MemberAssignment []byte
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(gmd.ClientId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(gmd.ClientHost); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
|
||||
if gmd.ClientId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.ClientHost, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
|
||||
assignment := new(ConsumerGroupMemberAssignment)
|
||||
err := decode(gmd.MemberAssignment, assignment)
|
||||
return assignment, err
|
||||
}
|
||||
|
||||
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
|
||||
metadata := new(ConsumerGroupMemberMetadata)
|
||||
err := decode(gmd.MemberMetadata, metadata)
|
||||
return metadata, err
|
||||
}
|
91
vendor/src/github.com/Shopify/sarama/describe_groups_response_test.go
vendored
Normal file
91
vendor/src/github.com/Shopify/sarama/describe_groups_response_test.go
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
describeGroupsResponseEmpty = []byte{
|
||||
0, 0, 0, 0, // no groups
|
||||
}
|
||||
|
||||
describeGroupsResponsePopulated = []byte{
|
||||
0, 0, 0, 2, // 2 groups
|
||||
|
||||
0, 0, // no error
|
||||
0, 3, 'f', 'o', 'o', // Group ID
|
||||
0, 3, 'b', 'a', 'r', // State
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type
|
||||
0, 3, 'b', 'a', 'z', // Protocol name
|
||||
0, 0, 0, 1, // 1 member
|
||||
0, 2, 'i', 'd', // Member ID
|
||||
0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID
|
||||
0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata
|
||||
0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment
|
||||
|
||||
0, 30, // ErrGroupAuthorizationFailed
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0, 0, 0,
|
||||
}
|
||||
)
|
||||
|
||||
func TestDescribeGroupsResponse(t *testing.T) {
|
||||
var response *DescribeGroupsResponse
|
||||
|
||||
response = new(DescribeGroupsResponse)
|
||||
testVersionDecodable(t, "empty", response, describeGroupsResponseEmpty, 0)
|
||||
if len(response.Groups) != 0 {
|
||||
t.Error("Expected no groups")
|
||||
}
|
||||
|
||||
response = new(DescribeGroupsResponse)
|
||||
testVersionDecodable(t, "populated", response, describeGroupsResponsePopulated, 0)
|
||||
if len(response.Groups) != 2 {
|
||||
t.Error("Expected two groups")
|
||||
}
|
||||
|
||||
group0 := response.Groups[0]
|
||||
if group0.Err != ErrNoError {
|
||||
t.Error("Unxpected groups[0].Err, found", group0.Err)
|
||||
}
|
||||
if group0.GroupId != "foo" {
|
||||
t.Error("Unxpected groups[0].GroupId, found", group0.GroupId)
|
||||
}
|
||||
if group0.State != "bar" {
|
||||
t.Error("Unxpected groups[0].State, found", group0.State)
|
||||
}
|
||||
if group0.ProtocolType != "consumer" {
|
||||
t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType)
|
||||
}
|
||||
if group0.Protocol != "baz" {
|
||||
t.Error("Unxpected groups[0].Protocol, found", group0.Protocol)
|
||||
}
|
||||
if len(group0.Members) != 1 {
|
||||
t.Error("Unxpected groups[0].Members, found", group0.Members)
|
||||
}
|
||||
if group0.Members["id"].ClientId != "sarama" {
|
||||
t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId)
|
||||
}
|
||||
if group0.Members["id"].ClientHost != "localhost" {
|
||||
t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost)
|
||||
}
|
||||
if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) {
|
||||
t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata)
|
||||
}
|
||||
if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) {
|
||||
t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment)
|
||||
}
|
||||
|
||||
group1 := response.Groups[1]
|
||||
if group1.Err != ErrGroupAuthorizationFailed {
|
||||
t.Error("Unxpected groups[1].Err, found", group0.Err)
|
||||
}
|
||||
if len(group1.Members) != 0 {
|
||||
t.Error("Unxpected groups[1].Members, found", group0.Members)
|
||||
}
|
||||
}
|
13
vendor/src/github.com/Shopify/sarama/dev.yml
vendored
Normal file
13
vendor/src/github.com/Shopify/sarama/dev.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
name: sarama
|
||||
|
||||
up:
|
||||
- go: 1.7.3
|
||||
|
||||
commands:
|
||||
test:
|
||||
run: make test
|
||||
desc: 'run unit tests'
|
||||
|
||||
packages:
|
||||
- git@github.com:Shopify/dev-shopify.git
|
||||
|
89
vendor/src/github.com/Shopify/sarama/encoder_decoder.go
vendored
Normal file
89
vendor/src/github.com/Shopify/sarama/encoder_decoder.go
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Encoder is the interface that wraps the basic Encode method.
|
||||
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
|
||||
type encoder interface {
|
||||
encode(pe packetEncoder) error
|
||||
}
|
||||
|
||||
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
|
||||
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
|
||||
if e == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var prepEnc prepEncoder
|
||||
var realEnc realEncoder
|
||||
|
||||
err := e.encode(&prepEnc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
|
||||
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
|
||||
}
|
||||
|
||||
realEnc.raw = make([]byte, prepEnc.length)
|
||||
realEnc.registry = metricRegistry
|
||||
err = e.encode(&realEnc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return realEnc.raw, nil
|
||||
}
|
||||
|
||||
// Decoder is the interface that wraps the basic Decode method.
|
||||
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
|
||||
type decoder interface {
|
||||
decode(pd packetDecoder) error
|
||||
}
|
||||
|
||||
type versionedDecoder interface {
|
||||
decode(pd packetDecoder, version int16) error
|
||||
}
|
||||
|
||||
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
|
||||
// interpreted using Kafka's encoding rules.
|
||||
func decode(buf []byte, in decoder) error {
|
||||
if buf == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
helper := realDecoder{raw: buf}
|
||||
err := in.decode(&helper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helper.off != len(buf) {
|
||||
return PacketDecodingError{"invalid length"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
|
||||
if buf == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
helper := realDecoder{raw: buf}
|
||||
err := in.decode(&helper, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if helper.off != len(buf) {
|
||||
return PacketDecodingError{"invalid length"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
197
vendor/src/github.com/Shopify/sarama/errors.go
vendored
Normal file
197
vendor/src/github.com/Shopify/sarama/errors.go
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
|
||||
// or otherwise failed to respond.
|
||||
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
|
||||
|
||||
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
|
||||
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
|
||||
|
||||
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
|
||||
// not contain the expected information.
|
||||
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
|
||||
|
||||
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
|
||||
// (meaning one outside of the range [0...numPartitions-1]).
|
||||
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
|
||||
|
||||
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
|
||||
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
|
||||
|
||||
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
|
||||
var ErrNotConnected = errors.New("kafka: broker not connected")
|
||||
|
||||
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
|
||||
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
|
||||
// of the message set.
|
||||
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
|
||||
|
||||
// ErrShuttingDown is returned when a producer receives a message during shutdown.
|
||||
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
|
||||
|
||||
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
|
||||
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
|
||||
|
||||
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
|
||||
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
|
||||
type PacketEncodingError struct {
|
||||
Info string
|
||||
}
|
||||
|
||||
func (err PacketEncodingError) Error() string {
|
||||
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
|
||||
}
|
||||
|
||||
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
|
||||
// This can be a bad CRC or length field, or any other invalid value.
|
||||
type PacketDecodingError struct {
|
||||
Info string
|
||||
}
|
||||
|
||||
func (err PacketDecodingError) Error() string {
|
||||
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
|
||||
}
|
||||
|
||||
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
|
||||
// when the specified configuration is invalid.
|
||||
type ConfigurationError string
|
||||
|
||||
func (err ConfigurationError) Error() string {
|
||||
return "kafka: invalid configuration (" + string(err) + ")"
|
||||
}
|
||||
|
||||
// KError is the type of error that can be returned directly by the Kafka broker.
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
|
||||
type KError int16
|
||||
|
||||
// Numeric error codes returned by the Kafka server.
|
||||
const (
|
||||
ErrNoError KError = 0
|
||||
ErrUnknown KError = -1
|
||||
ErrOffsetOutOfRange KError = 1
|
||||
ErrInvalidMessage KError = 2
|
||||
ErrUnknownTopicOrPartition KError = 3
|
||||
ErrInvalidMessageSize KError = 4
|
||||
ErrLeaderNotAvailable KError = 5
|
||||
ErrNotLeaderForPartition KError = 6
|
||||
ErrRequestTimedOut KError = 7
|
||||
ErrBrokerNotAvailable KError = 8
|
||||
ErrReplicaNotAvailable KError = 9
|
||||
ErrMessageSizeTooLarge KError = 10
|
||||
ErrStaleControllerEpochCode KError = 11
|
||||
ErrOffsetMetadataTooLarge KError = 12
|
||||
ErrNetworkException KError = 13
|
||||
ErrOffsetsLoadInProgress KError = 14
|
||||
ErrConsumerCoordinatorNotAvailable KError = 15
|
||||
ErrNotCoordinatorForConsumer KError = 16
|
||||
ErrInvalidTopic KError = 17
|
||||
ErrMessageSetSizeTooLarge KError = 18
|
||||
ErrNotEnoughReplicas KError = 19
|
||||
ErrNotEnoughReplicasAfterAppend KError = 20
|
||||
ErrInvalidRequiredAcks KError = 21
|
||||
ErrIllegalGeneration KError = 22
|
||||
ErrInconsistentGroupProtocol KError = 23
|
||||
ErrInvalidGroupId KError = 24
|
||||
ErrUnknownMemberId KError = 25
|
||||
ErrInvalidSessionTimeout KError = 26
|
||||
ErrRebalanceInProgress KError = 27
|
||||
ErrInvalidCommitOffsetSize KError = 28
|
||||
ErrTopicAuthorizationFailed KError = 29
|
||||
ErrGroupAuthorizationFailed KError = 30
|
||||
ErrClusterAuthorizationFailed KError = 31
|
||||
ErrInvalidTimestamp KError = 32
|
||||
ErrUnsupportedSASLMechanism KError = 33
|
||||
ErrIllegalSASLState KError = 34
|
||||
ErrUnsupportedVersion KError = 35
|
||||
ErrUnsupportedForMessageFormat KError = 43
|
||||
)
|
||||
|
||||
func (err KError) Error() string {
|
||||
// Error messages stolen/adapted from
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
|
||||
switch err {
|
||||
case ErrNoError:
|
||||
return "kafka server: Not an error, why are you printing me?"
|
||||
case ErrUnknown:
|
||||
return "kafka server: Unexpected (unknown?) server error."
|
||||
case ErrOffsetOutOfRange:
|
||||
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
|
||||
case ErrInvalidMessage:
|
||||
return "kafka server: Message contents does not match its CRC."
|
||||
case ErrUnknownTopicOrPartition:
|
||||
return "kafka server: Request was for a topic or partition that does not exist on this broker."
|
||||
case ErrInvalidMessageSize:
|
||||
return "kafka server: The message has a negative size."
|
||||
case ErrLeaderNotAvailable:
|
||||
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
|
||||
case ErrNotLeaderForPartition:
|
||||
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
|
||||
case ErrRequestTimedOut:
|
||||
return "kafka server: Request exceeded the user-specified time limit in the request."
|
||||
case ErrBrokerNotAvailable:
|
||||
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
|
||||
case ErrReplicaNotAvailable:
|
||||
return "kafka server: Replica information not available, one or more brokers are down."
|
||||
case ErrMessageSizeTooLarge:
|
||||
return "kafka server: Message was too large, server rejected it to avoid allocation error."
|
||||
case ErrStaleControllerEpochCode:
|
||||
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
|
||||
case ErrOffsetMetadataTooLarge:
|
||||
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
|
||||
case ErrNetworkException:
|
||||
return "kafka server: The server disconnected before a response was received."
|
||||
case ErrOffsetsLoadInProgress:
|
||||
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
|
||||
case ErrConsumerCoordinatorNotAvailable:
|
||||
return "kafka server: Offset's topic has not yet been created."
|
||||
case ErrNotCoordinatorForConsumer:
|
||||
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
|
||||
case ErrInvalidTopic:
|
||||
return "kafka server: The request attempted to perform an operation on an invalid topic."
|
||||
case ErrMessageSetSizeTooLarge:
|
||||
return "kafka server: The request included message batch larger than the configured segment size on the server."
|
||||
case ErrNotEnoughReplicas:
|
||||
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
|
||||
case ErrNotEnoughReplicasAfterAppend:
|
||||
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
|
||||
case ErrInvalidRequiredAcks:
|
||||
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
|
||||
case ErrIllegalGeneration:
|
||||
return "kafka server: The provided generation id is not the current generation."
|
||||
case ErrInconsistentGroupProtocol:
|
||||
return "kafka server: The provider group protocol type is incompatible with the other members."
|
||||
case ErrInvalidGroupId:
|
||||
return "kafka server: The provided group id was empty."
|
||||
case ErrUnknownMemberId:
|
||||
return "kafka server: The provided member is not known in the current generation."
|
||||
case ErrInvalidSessionTimeout:
|
||||
return "kafka server: The provided session timeout is outside the allowed range."
|
||||
case ErrRebalanceInProgress:
|
||||
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
|
||||
case ErrInvalidCommitOffsetSize:
|
||||
return "kafka server: The provided commit metadata was too large."
|
||||
case ErrTopicAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to access this topic."
|
||||
case ErrGroupAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to access this group."
|
||||
case ErrClusterAuthorizationFailed:
|
||||
return "kafka server: The client is not authorized to send this request type."
|
||||
case ErrInvalidTimestamp:
|
||||
return "kafka server: The timestamp of the message is out of acceptable range."
|
||||
case ErrUnsupportedSASLMechanism:
|
||||
return "kafka server: The broker does not support the requested SASL mechanism."
|
||||
case ErrIllegalSASLState:
|
||||
return "kafka server: Request is not valid given the current SASL state."
|
||||
case ErrUnsupportedVersion:
|
||||
return "kafka server: The version of API is not supported."
|
||||
case ErrUnsupportedForMessageFormat:
|
||||
return "kafka server: The requested operation is not supported by the message format version."
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
|
||||
}
|
9
vendor/src/github.com/Shopify/sarama/examples/README.md
vendored
Normal file
9
vendor/src/github.com/Shopify/sarama/examples/README.md
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# Sarama examples
|
||||
|
||||
This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarams's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
|
||||
|
||||
In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
|
||||
|
||||
#### HTTP server
|
||||
|
||||
[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.
|
7
vendor/src/github.com/Shopify/sarama/examples/http_server/README.md
vendored
Normal file
7
vendor/src/github.com/Shopify/sarama/examples/http_server/README.md
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
# HTTP server example
|
||||
|
||||
This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
|
||||
|
||||
If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
|
||||
|
||||
One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.
|
247
vendor/src/github.com/Shopify/sarama/examples/http_server/http_server.go
vendored
Normal file
247
vendor/src/github.com/Shopify/sarama/examples/http_server/http_server.go
vendored
Normal file
@ -0,0 +1,247 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Shopify/sarama"
|
||||
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", ":8080", "The address to bind to")
|
||||
brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
|
||||
verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
|
||||
certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
|
||||
keyFile = flag.String("key", "", "The optional key file for client authentication")
|
||||
caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
|
||||
verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *verbose {
|
||||
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
|
||||
}
|
||||
|
||||
if *brokers == "" {
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
brokerList := strings.Split(*brokers, ",")
|
||||
log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
|
||||
|
||||
server := &Server{
|
||||
DataCollector: newDataCollector(brokerList),
|
||||
AccessLogProducer: newAccessLogProducer(brokerList),
|
||||
}
|
||||
defer func() {
|
||||
if err := server.Close(); err != nil {
|
||||
log.Println("Failed to close server", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Fatal(server.Run(*addr))
|
||||
}
|
||||
|
||||
func createTlsConfiguration() (t *tls.Config) {
|
||||
if *certFile != "" && *keyFile != "" && *caFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
caCert, err := ioutil.ReadFile(*caFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
t = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caCertPool,
|
||||
InsecureSkipVerify: *verifySsl,
|
||||
}
|
||||
}
|
||||
// will be nil by default if nothing is provided
|
||||
return t
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
DataCollector sarama.SyncProducer
|
||||
AccessLogProducer sarama.AsyncProducer
|
||||
}
|
||||
|
||||
func (s *Server) Close() error {
|
||||
if err := s.DataCollector.Close(); err != nil {
|
||||
log.Println("Failed to shut down data collector cleanly", err)
|
||||
}
|
||||
|
||||
if err := s.AccessLogProducer.Close(); err != nil {
|
||||
log.Println("Failed to shut down access log producer cleanly", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Handler() http.Handler {
|
||||
return s.withAccessLog(s.collectQueryStringData())
|
||||
}
|
||||
|
||||
func (s *Server) Run(addr string) error {
|
||||
httpServer := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.Handler(),
|
||||
}
|
||||
|
||||
log.Printf("Listening for requests on %s...\n", addr)
|
||||
return httpServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *Server) collectQueryStringData() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// We are not setting a message key, which means that all messages will
|
||||
// be distributed randomly over the different partitions.
|
||||
partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
|
||||
Topic: "important",
|
||||
Value: sarama.StringEncoder(r.URL.RawQuery),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
fmt.Fprintf(w, "Failed to store your data:, %s", err)
|
||||
} else {
|
||||
// The tuple (topic, partition, offset) can be used as a unique identifier
|
||||
// for a message in a Kafka cluster.
|
||||
fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type accessLogEntry struct {
|
||||
Method string `json:"method"`
|
||||
Host string `json:"host"`
|
||||
Path string `json:"path"`
|
||||
IP string `json:"ip"`
|
||||
ResponseTime float64 `json:"response_time"`
|
||||
|
||||
encoded []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (ale *accessLogEntry) ensureEncoded() {
|
||||
if ale.encoded == nil && ale.err == nil {
|
||||
ale.encoded, ale.err = json.Marshal(ale)
|
||||
}
|
||||
}
|
||||
|
||||
func (ale *accessLogEntry) Length() int {
|
||||
ale.ensureEncoded()
|
||||
return len(ale.encoded)
|
||||
}
|
||||
|
||||
func (ale *accessLogEntry) Encode() ([]byte, error) {
|
||||
ale.ensureEncoded()
|
||||
return ale.encoded, ale.err
|
||||
}
|
||||
|
||||
func (s *Server) withAccessLog(next http.Handler) http.Handler {
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
started := time.Now()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
entry := &accessLogEntry{
|
||||
Method: r.Method,
|
||||
Host: r.Host,
|
||||
Path: r.RequestURI,
|
||||
IP: r.RemoteAddr,
|
||||
ResponseTime: float64(time.Since(started)) / float64(time.Second),
|
||||
}
|
||||
|
||||
// We will use the client's IP address as key. This will cause
|
||||
// all the access log entries of the same IP address to end up
|
||||
// on the same partition.
|
||||
s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
|
||||
Topic: "access_log",
|
||||
Key: sarama.StringEncoder(r.RemoteAddr),
|
||||
Value: entry,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func newDataCollector(brokerList []string) sarama.SyncProducer {
|
||||
|
||||
// For the data collector, we are looking for strong consistency semantics.
|
||||
// Because we don't change the flush settings, sarama will try to produce messages
|
||||
// as fast as possible to keep latency low.
|
||||
config := sarama.NewConfig()
|
||||
config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
|
||||
config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
|
||||
config.Producer.Return.Successes = true
|
||||
tlsConfig := createTlsConfiguration()
|
||||
if tlsConfig != nil {
|
||||
config.Net.TLS.Config = tlsConfig
|
||||
config.Net.TLS.Enable = true
|
||||
}
|
||||
|
||||
// On the broker side, you may want to change the following settings to get
|
||||
// stronger consistency guarantees:
|
||||
// - For your broker, set `unclean.leader.election.enable` to false
|
||||
// - For the topic, you could increase `min.insync.replicas`.
|
||||
|
||||
producer, err := sarama.NewSyncProducer(brokerList, config)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to start Sarama producer:", err)
|
||||
}
|
||||
|
||||
return producer
|
||||
}
|
||||
|
||||
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
|
||||
|
||||
// For the access log, we are looking for AP semantics, with high throughput.
|
||||
// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
|
||||
config := sarama.NewConfig()
|
||||
tlsConfig := createTlsConfiguration()
|
||||
if tlsConfig != nil {
|
||||
config.Net.TLS.Enable = true
|
||||
config.Net.TLS.Config = tlsConfig
|
||||
}
|
||||
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
|
||||
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
|
||||
config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
|
||||
|
||||
producer, err := sarama.NewAsyncProducer(brokerList, config)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to start Sarama producer:", err)
|
||||
}
|
||||
|
||||
// We will just log to STDOUT if we're not able to produce messages.
|
||||
// Note: messages will only be returned here after all retry attempts are exhausted.
|
||||
go func() {
|
||||
for err := range producer.Errors() {
|
||||
log.Println("Failed to write access log entry:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return producer
|
||||
}
|
109
vendor/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go
vendored
Normal file
109
vendor/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/Shopify/sarama/mocks"
|
||||
)
|
||||
|
||||
// In normal operation, we expect one access log entry,
|
||||
// and one data collector entry. Let's assume both will succeed.
|
||||
// We should return a HTTP 200 status.
|
||||
func TestCollectSuccessfully(t *testing.T) {
|
||||
dataCollectorMock := mocks.NewSyncProducer(t, nil)
|
||||
dataCollectorMock.ExpectSendMessageAndSucceed()
|
||||
|
||||
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
|
||||
accessLogProducerMock.ExpectInputAndSucceed()
|
||||
|
||||
// Now, use dependency injection to use the mocks.
|
||||
s := &Server{
|
||||
DataCollector: dataCollectorMock,
|
||||
AccessLogProducer: accessLogProducerMock,
|
||||
}
|
||||
|
||||
// The Server's Close call is important; it will call Close on
|
||||
// the two mock producers, which will then validate whether all
|
||||
// expectations are resolved.
|
||||
defer safeClose(t, s)
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res := httptest.NewRecorder()
|
||||
s.Handler().ServeHTTP(res, req)
|
||||
|
||||
if res.Code != 200 {
|
||||
t.Errorf("Expected HTTP status 200, found %d", res.Code)
|
||||
}
|
||||
|
||||
if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" {
|
||||
t.Error("Unexpected response body", res.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// Now, let's see if we handle the case of not being able to produce
|
||||
// to the data collector properly. In this case we should return a 500 status.
|
||||
func TestCollectionFailure(t *testing.T) {
|
||||
dataCollectorMock := mocks.NewSyncProducer(t, nil)
|
||||
dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut)
|
||||
|
||||
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
|
||||
accessLogProducerMock.ExpectInputAndSucceed()
|
||||
|
||||
s := &Server{
|
||||
DataCollector: dataCollectorMock,
|
||||
AccessLogProducer: accessLogProducerMock,
|
||||
}
|
||||
defer safeClose(t, s)
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res := httptest.NewRecorder()
|
||||
s.Handler().ServeHTTP(res, req)
|
||||
|
||||
if res.Code != 500 {
|
||||
t.Errorf("Expected HTTP status 500, found %d", res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// We don't expect any data collector calls because the path is wrong,
|
||||
// so we are not setting any expectations on the dataCollectorMock. It
|
||||
// will still generate an access log entry though.
|
||||
func TestWrongPath(t *testing.T) {
|
||||
dataCollectorMock := mocks.NewSyncProducer(t, nil)
|
||||
|
||||
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
|
||||
accessLogProducerMock.ExpectInputAndSucceed()
|
||||
|
||||
s := &Server{
|
||||
DataCollector: dataCollectorMock,
|
||||
AccessLogProducer: accessLogProducerMock,
|
||||
}
|
||||
defer safeClose(t, s)
|
||||
|
||||
req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res := httptest.NewRecorder()
|
||||
|
||||
s.Handler().ServeHTTP(res, req)
|
||||
|
||||
if res.Code != 404 {
|
||||
t.Errorf("Expected HTTP status 404, found %d", res.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func safeClose(t *testing.T, o io.Closer) {
|
||||
if err := o.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
136
vendor/src/github.com/Shopify/sarama/fetch_request.go
vendored
Normal file
136
vendor/src/github.com/Shopify/sarama/fetch_request.go
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
package sarama
|
||||
|
||||
type fetchRequestBlock struct {
|
||||
fetchOffset int64
|
||||
maxBytes int32
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt64(b.fetchOffset)
|
||||
pe.putInt32(b.maxBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
|
||||
if b.fetchOffset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.maxBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FetchRequest struct {
|
||||
MaxWaitTime int32
|
||||
MinBytes int32
|
||||
Version int16
|
||||
blocks map[string]map[int32]*fetchRequestBlock
|
||||
}
|
||||
|
||||
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
pe.putInt32(r.MaxWaitTime)
|
||||
pe.putInt32(r.MinBytes)
|
||||
err = pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, blocks := range r.blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range blocks {
|
||||
pe.putInt32(partition)
|
||||
err = block.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
if _, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.MinBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fetchBlock := &fetchRequestBlock{}
|
||||
if err = fetchBlock.decode(pd); err != nil {
|
||||
return nil
|
||||
}
|
||||
r.blocks[topic][partition] = fetchBlock
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchRequest) key() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *FetchRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
tmp := new(fetchRequestBlock)
|
||||
tmp.maxBytes = maxBytes
|
||||
tmp.fetchOffset = fetchOffset
|
||||
|
||||
r.blocks[topic][partitionID] = tmp
|
||||
}
|
34
vendor/src/github.com/Shopify/sarama/fetch_request_test.go
vendored
Normal file
34
vendor/src/github.com/Shopify/sarama/fetch_request_test.go
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
fetchRequestNoBlocks = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
fetchRequestWithProperties = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
fetchRequestOneBlock = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
|
||||
)
|
||||
|
||||
func TestFetchRequest(t *testing.T) {
|
||||
request := new(FetchRequest)
|
||||
testRequest(t, "no blocks", request, fetchRequestNoBlocks)
|
||||
|
||||
request.MaxWaitTime = 0x20
|
||||
request.MinBytes = 0xEF
|
||||
testRequest(t, "with properties", request, fetchRequestWithProperties)
|
||||
|
||||
request.MaxWaitTime = 0
|
||||
request.MinBytes = 0
|
||||
request.AddBlock("topic", 0x12, 0x34, 0x56)
|
||||
testRequest(t, "one block", request, fetchRequestOneBlock)
|
||||
}
|
210
vendor/src/github.com/Shopify/sarama/fetch_response.go
vendored
Normal file
210
vendor/src/github.com/Shopify/sarama/fetch_response.go
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type FetchResponseBlock struct {
|
||||
Err KError
|
||||
HighWaterMarkOffset int64
|
||||
MsgSet MessageSet
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
b.HighWaterMarkOffset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgSetSize, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = (&b.MsgSet).decode(msgSetDecoder)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
pe.putInt64(b.HighWaterMarkOffset)
|
||||
|
||||
pe.push(&lengthField{})
|
||||
err = b.MsgSet.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
type FetchResponse struct {
|
||||
Blocks map[string]map[int32]*FetchResponseBlock
|
||||
ThrottleTime time.Duration
|
||||
Version int16 // v1 requires 0.9+, v2 requires 0.10+
|
||||
}
|
||||
|
||||
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.Version >= 1 {
|
||||
throttle, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
|
||||
}
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(FetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range r.Blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, block := range partitions {
|
||||
pe.putInt32(id)
|
||||
err = block.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchResponse) key() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *FetchResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
|
||||
}
|
||||
partitions, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
partitions = make(map[int32]*FetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
frb, ok := partitions[partition]
|
||||
if !ok {
|
||||
frb = new(FetchResponseBlock)
|
||||
partitions[partition] = frb
|
||||
}
|
||||
frb.Err = err
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
|
||||
}
|
||||
partitions, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
partitions = make(map[int32]*FetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
frb, ok := partitions[partition]
|
||||
if !ok {
|
||||
frb = new(FetchResponseBlock)
|
||||
partitions[partition] = frb
|
||||
}
|
||||
var kb []byte
|
||||
var vb []byte
|
||||
if key != nil {
|
||||
kb, _ = key.Encode()
|
||||
}
|
||||
if value != nil {
|
||||
vb, _ = value.Encode()
|
||||
}
|
||||
msg := &Message{Key: kb, Value: vb}
|
||||
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
|
||||
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
|
||||
}
|
84
vendor/src/github.com/Shopify/sarama/fetch_response_test.go
vendored
Normal file
84
vendor/src/github.com/Shopify/sarama/fetch_response_test.go
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyFetchResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
oneMessageFetchResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x05,
|
||||
0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10,
|
||||
0x00, 0x00, 0x00, 0x1C,
|
||||
// messageSet
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x10,
|
||||
// message
|
||||
0x23, 0x96, 0x4a, 0xf7, // CRC
|
||||
0x00,
|
||||
0x00,
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
|
||||
)
|
||||
|
||||
func TestEmptyFetchResponse(t *testing.T) {
|
||||
response := FetchResponse{}
|
||||
testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0)
|
||||
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced topic blocks where there were none.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestOneMessageFetchResponse(t *testing.T) {
|
||||
response := FetchResponse{}
|
||||
testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0)
|
||||
|
||||
if len(response.Blocks) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of topic blocks.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["topic"]) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
|
||||
}
|
||||
|
||||
block := response.GetBlock("topic", 5)
|
||||
if block == nil {
|
||||
t.Fatal("GetBlock didn't return block.")
|
||||
}
|
||||
if block.Err != ErrOffsetOutOfRange {
|
||||
t.Error("Decoding didn't produce correct error code.")
|
||||
}
|
||||
if block.HighWaterMarkOffset != 0x10101010 {
|
||||
t.Error("Decoding didn't produce correct high water mark offset.")
|
||||
}
|
||||
if block.MsgSet.PartialTrailingMessage {
|
||||
t.Error("Decoding detected a partial trailing message where there wasn't one.")
|
||||
}
|
||||
|
||||
if len(block.MsgSet.Messages) != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of messages.")
|
||||
}
|
||||
msgBlock := block.MsgSet.Messages[0]
|
||||
if msgBlock.Offset != 0x550000 {
|
||||
t.Error("Decoding produced incorrect message offset.")
|
||||
}
|
||||
msg := msgBlock.Msg
|
||||
if msg.Codec != CompressionNone {
|
||||
t.Error("Decoding produced incorrect message compression.")
|
||||
}
|
||||
if msg.Key != nil {
|
||||
t.Error("Decoding produced message key where there was none.")
|
||||
}
|
||||
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
|
||||
t.Error("Decoding produced incorrect message value.")
|
||||
}
|
||||
}
|
90
vendor/src/github.com/Shopify/sarama/functional_client_test.go
vendored
Normal file
90
vendor/src/github.com/Shopify/sarama/functional_client_test.go
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFuncConnectionFailure(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
Proxies["kafka1"].Enabled = false
|
||||
SaveProxy(t, "kafka1")
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
|
||||
_, err := NewClient([]string{kafkaBrokers[0]}, config)
|
||||
if err != ErrOutOfBrokers {
|
||||
t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuncClientMetadata(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Metadata.Retry.Backoff = 10 * time.Millisecond
|
||||
client, err := NewClient(kafkaBrokers, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
|
||||
}
|
||||
|
||||
partitions, err := client.Partitions("test.4")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(partitions) != 4 {
|
||||
t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions)
|
||||
}
|
||||
|
||||
partitions, err = client.Partitions("test.1")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(partitions) != 1 {
|
||||
t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions)
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func TestFuncClientCoordinator(t *testing.T) {
|
||||
checkKafkaVersion(t, "0.8.2")
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
client, err := NewClient(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if connected, err := broker.Connected(); !connected || err != nil {
|
||||
t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr())
|
||||
}
|
||||
}
|
||||
|
||||
safeClose(t, client)
|
||||
}
|
61
vendor/src/github.com/Shopify/sarama/functional_consumer_test.go
vendored
Normal file
61
vendor/src/github.com/Shopify/sarama/functional_consumer_test.go
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFuncConsumerOffsetOutOfRange(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
consumer, err := NewConsumer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange {
|
||||
t.Error("Expected ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
|
||||
if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange {
|
||||
t.Error("Expected ErrOffsetOutOfRange, got:", err)
|
||||
}
|
||||
|
||||
safeClose(t, consumer)
|
||||
}
|
||||
|
||||
func TestConsumerHighWaterMarkOffset(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
p, err := NewSyncProducer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, p)
|
||||
|
||||
_, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c, err := NewConsumer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, c)
|
||||
|
||||
pc, err := c.ConsumePartition("test.1", 0, OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
<-pc.Messages()
|
||||
|
||||
if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 {
|
||||
t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo)
|
||||
}
|
||||
|
||||
safeClose(t, pc)
|
||||
}
|
47
vendor/src/github.com/Shopify/sarama/functional_offset_manager_test.go
vendored
Normal file
47
vendor/src/github.com/Shopify/sarama/functional_offset_manager_test.go
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFuncOffsetManager(t *testing.T) {
|
||||
checkKafkaVersion(t, "0.8.2")
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
client, err := NewClient(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pom1, err := offsetManager.ManagePartition("test.1", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pom1.MarkOffset(10, "test metadata")
|
||||
safeClose(t, pom1)
|
||||
|
||||
pom2, err := offsetManager.ManagePartition("test.1", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
offset, metadata := pom2.NextOffset()
|
||||
|
||||
if offset != 10 {
|
||||
t.Errorf("Expected the next offset to be 10, found %d.", offset)
|
||||
}
|
||||
if metadata != "test metadata" {
|
||||
t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata)
|
||||
}
|
||||
|
||||
safeClose(t, pom2)
|
||||
safeClose(t, offsetManager)
|
||||
safeClose(t, client)
|
||||
}
|
323
vendor/src/github.com/Shopify/sarama/functional_producer_test.go
vendored
Normal file
323
vendor/src/github.com/Shopify/sarama/functional_producer_test.go
vendored
Normal file
@ -0,0 +1,323 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
toxiproxy "github.com/Shopify/toxiproxy/client"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
const TestBatchSize = 1000
|
||||
|
||||
func TestFuncProducing(t *testing.T) {
|
||||
config := NewConfig()
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingGzip(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Compression = CompressionGZIP
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingSnappy(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Compression = CompressionSnappy
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingNoResponse(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.RequiredAcks = NoResponse
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncProducingFlushing(t *testing.T) {
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = TestBatchSize / 8
|
||||
config.Producer.Flush.Frequency = 250 * time.Millisecond
|
||||
testProducingMessages(t, config)
|
||||
}
|
||||
|
||||
func TestFuncMultiPartitionProduce(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
config := NewConfig()
|
||||
config.ChannelBufferSize = 20
|
||||
config.Producer.Flush.Frequency = 50 * time.Millisecond
|
||||
config.Producer.Flush.Messages = 200
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewSyncProducer(kafkaBrokers, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(TestBatchSize)
|
||||
|
||||
for i := 1; i <= TestBatchSize; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
|
||||
if _, _, err := producer.SendMessage(msg); err != nil {
|
||||
t.Error(i, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if err := producer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuncProducingToInvalidTopic(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
producer, err := NewSyncProducer(kafkaBrokers, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
|
||||
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
|
||||
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
|
||||
}
|
||||
|
||||
safeClose(t, producer)
|
||||
}
|
||||
|
||||
func testProducingMessages(t *testing.T, config *Config) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
// Configure some latency in order to properly validate the request latency metric
|
||||
for _, proxy := range Proxies {
|
||||
if _, err := proxy.AddToxic("", "latency", "", 1, toxiproxy.Attributes{"latency": 10}); err != nil {
|
||||
t.Fatal("Unable to configure latency toxicity", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.Producer.Return.Successes = true
|
||||
config.Consumer.Return.Errors = true
|
||||
|
||||
client, err := NewClient(kafkaBrokers, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Keep in mind the current offset
|
||||
initialOffset, err := client.GetOffset("test.1", 0, OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
producer, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedResponses := TestBatchSize
|
||||
for i := 1; i <= TestBatchSize; {
|
||||
msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
|
||||
select {
|
||||
case producer.Input() <- msg:
|
||||
i++
|
||||
case ret := <-producer.Errors():
|
||||
t.Fatal(ret.Err)
|
||||
case <-producer.Successes():
|
||||
expectedResponses--
|
||||
}
|
||||
}
|
||||
for expectedResponses > 0 {
|
||||
select {
|
||||
case ret := <-producer.Errors():
|
||||
t.Fatal(ret.Err)
|
||||
case <-producer.Successes():
|
||||
expectedResponses--
|
||||
}
|
||||
}
|
||||
safeClose(t, producer)
|
||||
|
||||
// Validate producer metrics before using the consumer minus the offset request
|
||||
validateMetrics(t, client)
|
||||
|
||||
master, err := NewConsumerFromClient(client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
consumer, err := master.ConsumePartition("test.1", 0, initialOffset)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 1; i <= TestBatchSize; i++ {
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("Not received any more events in the last 10 seconds.")
|
||||
|
||||
case err := <-consumer.Errors():
|
||||
t.Error(err)
|
||||
|
||||
case message := <-consumer.Messages():
|
||||
if string(message.Value) != fmt.Sprintf("testing %d", i) {
|
||||
t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
safeClose(t, consumer)
|
||||
safeClose(t, client)
|
||||
}
|
||||
|
||||
func validateMetrics(t *testing.T, client Client) {
|
||||
// Get the broker used by test1 topic
|
||||
var broker *Broker
|
||||
if partitions, err := client.Partitions("test.1"); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
for _, partition := range partitions {
|
||||
if b, err := client.Leader("test.1", partition); err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
if broker != nil && b != broker {
|
||||
t.Fatal("Expected only one broker, got at least 2")
|
||||
}
|
||||
broker = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metricValidators := newMetricValidators()
|
||||
noResponse := client.Config().Producer.RequiredAcks == NoResponse
|
||||
compressionEnabled := client.Config().Producer.Compression != CompressionNone
|
||||
|
||||
// We are adding 10ms of latency to all requests with toxiproxy
|
||||
minRequestLatencyInMs := 10
|
||||
if noResponse {
|
||||
// but when we do not wait for a response it can be less than 1ms
|
||||
minRequestLatencyInMs = 0
|
||||
}
|
||||
|
||||
// We read at least 1 byte from the broker
|
||||
metricValidators.registerForAllBrokers(broker, minCountMeterValidator("incoming-byte-rate", 1))
|
||||
// in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request)
|
||||
metricValidators.register(minCountMeterValidator("request-rate", 3))
|
||||
metricValidators.register(minCountHistogramValidator("request-size", 3))
|
||||
metricValidators.register(minValHistogramValidator("request-size", 1))
|
||||
metricValidators.register(minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
|
||||
// and at least 2 requests to the registered broker (offset + produces)
|
||||
metricValidators.registerForBroker(broker, minCountMeterValidator("request-rate", 2))
|
||||
metricValidators.registerForBroker(broker, minCountHistogramValidator("request-size", 2))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("request-size", 1))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
|
||||
|
||||
// We send at least 1 batch
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("batch-size", 1))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("batch-size", 1))
|
||||
if compressionEnabled {
|
||||
// We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one "fake" record
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("compression-ratio", 1))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 50))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 1000))
|
||||
} else {
|
||||
// We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record
|
||||
metricValidators.registerForGlobalAndTopic("test_1", countHistogramValidator("compression-ratio", TestBatchSize))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 100))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 100))
|
||||
}
|
||||
|
||||
// We send exactly TestBatchSize messages
|
||||
metricValidators.registerForGlobalAndTopic("test_1", countMeterValidator("record-send-rate", TestBatchSize))
|
||||
// We send at least one record per request
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("records-per-request", 1))
|
||||
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("records-per-request", 1))
|
||||
|
||||
// We receive at least 1 byte from the broker
|
||||
metricValidators.registerForAllBrokers(broker, minCountMeterValidator("outgoing-byte-rate", 1))
|
||||
if noResponse {
|
||||
// in exactly 2 global responses (metadata + offset)
|
||||
metricValidators.register(countMeterValidator("response-rate", 2))
|
||||
metricValidators.register(minCountHistogramValidator("response-size", 2))
|
||||
metricValidators.register(minValHistogramValidator("response-size", 1))
|
||||
// and exactly 1 offset response for the registered broker
|
||||
metricValidators.registerForBroker(broker, countMeterValidator("response-rate", 1))
|
||||
metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 1))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
|
||||
} else {
|
||||
// in at least 3 global responses (metadata + offset + produces)
|
||||
metricValidators.register(minCountMeterValidator("response-rate", 3))
|
||||
metricValidators.register(minCountHistogramValidator("response-size", 3))
|
||||
metricValidators.register(minValHistogramValidator("response-size", 1))
|
||||
// and at least 2 for the registered broker
|
||||
metricValidators.registerForBroker(broker, minCountMeterValidator("response-rate", 2))
|
||||
metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 2))
|
||||
metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
|
||||
}
|
||||
|
||||
// Run the validators
|
||||
metricValidators.run(t, client.Config().MetricRegistry)
|
||||
}
|
||||
|
||||
// Benchmarks
|
||||
|
||||
func BenchmarkProducerSmall(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128)))
|
||||
}
|
||||
func BenchmarkProducerMedium(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024)))
|
||||
}
|
||||
func BenchmarkProducerLarge(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192)))
|
||||
}
|
||||
func BenchmarkProducerSmallSinglePartition(b *testing.B) {
|
||||
benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128)))
|
||||
}
|
||||
func BenchmarkProducerMediumSnappy(b *testing.B) {
|
||||
conf := NewConfig()
|
||||
conf.Producer.Compression = CompressionSnappy
|
||||
benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024)))
|
||||
}
|
||||
|
||||
func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) {
|
||||
setupFunctionalTest(b)
|
||||
defer teardownFunctionalTest(b)
|
||||
|
||||
metricsDisable := os.Getenv("METRICS_DISABLE")
|
||||
if metricsDisable != "" {
|
||||
previousUseNilMetrics := metrics.UseNilMetrics
|
||||
Logger.Println("Disabling metrics using no-op implementation")
|
||||
metrics.UseNilMetrics = true
|
||||
// Restore previous setting
|
||||
defer func() {
|
||||
metrics.UseNilMetrics = previousUseNilMetrics
|
||||
}()
|
||||
}
|
||||
|
||||
producer, err := NewAsyncProducer(kafkaBrokers, conf)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 1; i <= b.N; {
|
||||
msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value}
|
||||
select {
|
||||
case producer.Input() <- msg:
|
||||
i++
|
||||
case ret := <-producer.Errors():
|
||||
b.Fatal(ret.Err)
|
||||
}
|
||||
}
|
||||
safeClose(b, producer)
|
||||
}
|
148
vendor/src/github.com/Shopify/sarama/functional_test.go
vendored
Normal file
148
vendor/src/github.com/Shopify/sarama/functional_test.go
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
toxiproxy "github.com/Shopify/toxiproxy/client"
|
||||
)
|
||||
|
||||
const (
|
||||
VagrantToxiproxy = "http://192.168.100.67:8474"
|
||||
VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095"
|
||||
VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185"
|
||||
)
|
||||
|
||||
var (
|
||||
kafkaAvailable, kafkaRequired bool
|
||||
kafkaBrokers []string
|
||||
|
||||
proxyClient *toxiproxy.Client
|
||||
Proxies map[string]*toxiproxy.Proxy
|
||||
ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"}
|
||||
KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"}
|
||||
)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("DEBUG") == "true" {
|
||||
Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
|
||||
}
|
||||
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
if tmp := os.Getenv("TEST_SEED"); tmp != "" {
|
||||
seed, _ = strconv.ParseInt(tmp, 0, 64)
|
||||
}
|
||||
Logger.Println("Using random seed:", seed)
|
||||
rand.Seed(seed)
|
||||
|
||||
proxyAddr := os.Getenv("TOXIPROXY_ADDR")
|
||||
if proxyAddr == "" {
|
||||
proxyAddr = VagrantToxiproxy
|
||||
}
|
||||
proxyClient = toxiproxy.NewClient(proxyAddr)
|
||||
|
||||
kafkaPeers := os.Getenv("KAFKA_PEERS")
|
||||
if kafkaPeers == "" {
|
||||
kafkaPeers = VagrantKafkaPeers
|
||||
}
|
||||
kafkaBrokers = strings.Split(kafkaPeers, ",")
|
||||
|
||||
if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil {
|
||||
if err = c.Close(); err == nil {
|
||||
kafkaAvailable = true
|
||||
}
|
||||
}
|
||||
|
||||
kafkaRequired = os.Getenv("CI") != ""
|
||||
}
|
||||
|
||||
func checkKafkaAvailability(t testing.TB) {
|
||||
if !kafkaAvailable {
|
||||
if kafkaRequired {
|
||||
t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
|
||||
} else {
|
||||
t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkKafkaVersion(t testing.TB, requiredVersion string) {
|
||||
kafkaVersion := os.Getenv("KAFKA_VERSION")
|
||||
if kafkaVersion == "" {
|
||||
t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion)
|
||||
} else {
|
||||
available := parseKafkaVersion(kafkaVersion)
|
||||
required := parseKafkaVersion(requiredVersion)
|
||||
if !available.satisfies(required) {
|
||||
t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func resetProxies(t testing.TB) {
|
||||
if err := proxyClient.ResetState(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
Proxies = nil
|
||||
}
|
||||
|
||||
func fetchProxies(t testing.TB) {
|
||||
var err error
|
||||
Proxies, err = proxyClient.Proxies()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func SaveProxy(t *testing.T, px string) {
|
||||
if err := Proxies[px].Save(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func setupFunctionalTest(t testing.TB) {
|
||||
checkKafkaAvailability(t)
|
||||
resetProxies(t)
|
||||
fetchProxies(t)
|
||||
}
|
||||
|
||||
func teardownFunctionalTest(t testing.TB) {
|
||||
resetProxies(t)
|
||||
}
|
||||
|
||||
type kafkaVersion []int
|
||||
|
||||
func (kv kafkaVersion) satisfies(other kafkaVersion) bool {
|
||||
var ov int
|
||||
for index, v := range kv {
|
||||
if len(other) <= index {
|
||||
ov = 0
|
||||
} else {
|
||||
ov = other[index]
|
||||
}
|
||||
|
||||
if v < ov {
|
||||
return false
|
||||
} else if v > ov {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func parseKafkaVersion(version string) kafkaVersion {
|
||||
numbers := strings.Split(version, ".")
|
||||
result := make(kafkaVersion, 0, len(numbers))
|
||||
for _, number := range numbers {
|
||||
nr, _ := strconv.Atoi(number)
|
||||
result = append(result, nr)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
47
vendor/src/github.com/Shopify/sarama/heartbeat_request.go
vendored
Normal file
47
vendor/src/github.com/Shopify/sarama/heartbeat_request.go
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package sarama
|
||||
|
||||
type HeartbeatRequest struct {
|
||||
GroupId string
|
||||
GenerationId int32
|
||||
MemberId string
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) key() int16 {
|
||||
return 12
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
21
vendor/src/github.com/Shopify/sarama/heartbeat_request_test.go
vendored
Normal file
21
vendor/src/github.com/Shopify/sarama/heartbeat_request_test.go
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
basicHeartbeatRequest = []byte{
|
||||
0, 3, 'f', 'o', 'o', // Group ID
|
||||
0x00, 0x01, 0x02, 0x03, // Generatiuon ID
|
||||
0, 3, 'b', 'a', 'z', // Member ID
|
||||
}
|
||||
)
|
||||
|
||||
func TestHeartbeatRequest(t *testing.T) {
|
||||
var request *HeartbeatRequest
|
||||
|
||||
request = new(HeartbeatRequest)
|
||||
request.GroupId = "foo"
|
||||
request.GenerationId = 66051
|
||||
request.MemberId = "baz"
|
||||
testRequest(t, "basic", request, basicHeartbeatRequest)
|
||||
}
|
32
vendor/src/github.com/Shopify/sarama/heartbeat_response.go
vendored
Normal file
32
vendor/src/github.com/Shopify/sarama/heartbeat_response.go
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package sarama
|
||||
|
||||
type HeartbeatResponse struct {
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) key() int16 {
|
||||
return 12
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
18
vendor/src/github.com/Shopify/sarama/heartbeat_response_test.go
vendored
Normal file
18
vendor/src/github.com/Shopify/sarama/heartbeat_response_test.go
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
heartbeatResponseNoError = []byte{
|
||||
0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestHeartbeatResponse(t *testing.T) {
|
||||
var response *HeartbeatResponse
|
||||
|
||||
response = new(HeartbeatResponse)
|
||||
testVersionDecodable(t, "no error", response, heartbeatResponseNoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding error failed: no error expected but found", response.Err)
|
||||
}
|
||||
}
|
108
vendor/src/github.com/Shopify/sarama/join_group_request.go
vendored
Normal file
108
vendor/src/github.com/Shopify/sarama/join_group_request.go
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package sarama
|
||||
|
||||
type JoinGroupRequest struct {
|
||||
GroupId string
|
||||
SessionTimeout int32
|
||||
MemberId string
|
||||
ProtocolType string
|
||||
GroupProtocols map[string][]byte
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.SessionTimeout)
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.ProtocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
|
||||
return err
|
||||
}
|
||||
for name, metadata := range r.GroupProtocols {
|
||||
if err := pe.putString(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putBytes(metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.SessionTimeout, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ProtocolType, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GroupProtocols = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metadata, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.GroupProtocols[name] = metadata
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) key() int16 {
|
||||
return 11
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
|
||||
if r.GroupProtocols == nil {
|
||||
r.GroupProtocols = make(map[string][]byte)
|
||||
}
|
||||
|
||||
r.GroupProtocols[name] = metadata
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
|
||||
bin, err := encode(metadata, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.AddGroupProtocol(name, bin)
|
||||
return nil
|
||||
}
|
41
vendor/src/github.com/Shopify/sarama/join_group_request_test.go
vendored
Normal file
41
vendor/src/github.com/Shopify/sarama/join_group_request_test.go
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
joinGroupRequestNoProtocols = []byte{
|
||||
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
|
||||
0, 0, 0, 100, // Session timeout
|
||||
0, 0, // Member ID
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
|
||||
0, 0, 0, 0, // 0 protocol groups
|
||||
}
|
||||
|
||||
joinGroupRequestOneProtocol = []byte{
|
||||
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
|
||||
0, 0, 0, 100, // Session timeout
|
||||
0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
|
||||
0, 0, 0, 1, // 1 group protocol
|
||||
0, 3, 'o', 'n', 'e', // Protocol name
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata
|
||||
}
|
||||
)
|
||||
|
||||
func TestJoinGroupRequest(t *testing.T) {
|
||||
var request *JoinGroupRequest
|
||||
|
||||
request = new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.ProtocolType = "consumer"
|
||||
testRequest(t, "no protocols", request, joinGroupRequestNoProtocols)
|
||||
|
||||
request = new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.MemberId = "OneProtocol"
|
||||
request.ProtocolType = "consumer"
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
testRequest(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
}
|
114
vendor/src/github.com/Shopify/sarama/join_group_response.go
vendored
Normal file
114
vendor/src/github.com/Shopify/sarama/join_group_response.go
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
package sarama
|
||||
|
||||
type JoinGroupResponse struct {
|
||||
Err KError
|
||||
GenerationId int32
|
||||
GroupProtocol string
|
||||
LeaderId string
|
||||
MemberId string
|
||||
Members map[string][]byte
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
|
||||
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
|
||||
for id, bin := range r.Members {
|
||||
meta := new(ConsumerGroupMemberMetadata)
|
||||
if err := decode(bin, meta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
members[id] = *meta
|
||||
}
|
||||
return members, nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
if err := pe.putString(r.GroupProtocol); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.LeaderId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.Members)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for memberId, memberMetadata := range r.Members {
|
||||
if err := pe.putString(memberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putBytes(memberMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
if r.GenerationId, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.GroupProtocol, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.LeaderId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Members = make(map[string][]byte)
|
||||
for i := 0; i < n; i++ {
|
||||
memberId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
memberMetadata, err := pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Members[memberId] = memberMetadata
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) key() int16 {
|
||||
return 11
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
98
vendor/src/github.com/Shopify/sarama/join_group_response_test.go
vendored
Normal file
98
vendor/src/github.com/Shopify/sarama/join_group_response_test.go
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
joinGroupResponseNoError = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
0, 3, 'f', 'o', 'o', // Leader ID
|
||||
0, 3, 'b', 'a', 'r', // Member ID
|
||||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
|
||||
joinGroupResponseWithError = []byte{
|
||||
0, 23, // Error: inconsistent group protocol
|
||||
0x00, 0x00, 0x00, 0x00, // Generation ID
|
||||
0, 0, // Protocol name chosen
|
||||
0, 0, // Leader ID
|
||||
0, 0, // Member ID
|
||||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
|
||||
joinGroupResponseLeader = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
0, 3, 'f', 'o', 'o', // Leader ID
|
||||
0, 3, 'f', 'o', 'o', // Member ID == Leader ID
|
||||
0, 0, 0, 1, // 1 member
|
||||
0, 3, 'f', 'o', 'o', // Member ID
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata
|
||||
}
|
||||
)
|
||||
|
||||
func TestJoinGroupResponse(t *testing.T) {
|
||||
var response *JoinGroupResponse
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, joinGroupResponseNoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: no error expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 66051 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.LeaderId != "foo" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "bar" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if len(response.Members) != 0 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseWithError, 0)
|
||||
if response.Err != ErrInconsistentGroupProtocol {
|
||||
t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 0 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.LeaderId != "" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if len(response.Members) != 0 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseLeader, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: ErrNoError expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 66051 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.LeaderId != "foo" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "foo" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if len(response.Members) != 1 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
if !reflect.DeepEqual(response.Members["foo"], []byte{0x01, 0x02, 0x03}) {
|
||||
t.Error("Decoding foo member failed, found:", response.Members["foo"])
|
||||
}
|
||||
}
|
40
vendor/src/github.com/Shopify/sarama/leave_group_request.go
vendored
Normal file
40
vendor/src/github.com/Shopify/sarama/leave_group_request.go
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package sarama
|
||||
|
||||
type LeaveGroupRequest struct {
|
||||
GroupId string
|
||||
MemberId string
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.GroupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) key() int16 {
|
||||
return 13
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
19
vendor/src/github.com/Shopify/sarama/leave_group_request_test.go
vendored
Normal file
19
vendor/src/github.com/Shopify/sarama/leave_group_request_test.go
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
basicLeaveGroupRequest = []byte{
|
||||
0, 3, 'f', 'o', 'o',
|
||||
0, 3, 'b', 'a', 'r',
|
||||
}
|
||||
)
|
||||
|
||||
func TestLeaveGroupRequest(t *testing.T) {
|
||||
var request *LeaveGroupRequest
|
||||
|
||||
request = new(LeaveGroupRequest)
|
||||
request.GroupId = "foo"
|
||||
request.MemberId = "bar"
|
||||
testRequest(t, "basic", request, basicLeaveGroupRequest)
|
||||
}
|
32
vendor/src/github.com/Shopify/sarama/leave_group_response.go
vendored
Normal file
32
vendor/src/github.com/Shopify/sarama/leave_group_response.go
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package sarama
|
||||
|
||||
type LeaveGroupResponse struct {
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) key() int16 {
|
||||
return 13
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
24
vendor/src/github.com/Shopify/sarama/leave_group_response_test.go
vendored
Normal file
24
vendor/src/github.com/Shopify/sarama/leave_group_response_test.go
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
leaveGroupResponseNoError = []byte{0x00, 0x00}
|
||||
leaveGroupResponseWithError = []byte{0, 25}
|
||||
)
|
||||
|
||||
func TestLeaveGroupResponse(t *testing.T) {
|
||||
var response *LeaveGroupResponse
|
||||
|
||||
response = new(LeaveGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, leaveGroupResponseNoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding error failed: no error expected but found", response.Err)
|
||||
}
|
||||
|
||||
response = new(LeaveGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, leaveGroupResponseWithError, 0)
|
||||
if response.Err != ErrUnknownMemberId {
|
||||
t.Error("Decoding error failed: ErrUnknownMemberId expected but found", response.Err)
|
||||
}
|
||||
}
|
29
vendor/src/github.com/Shopify/sarama/length_field.go
vendored
Normal file
29
vendor/src/github.com/Shopify/sarama/length_field.go
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
package sarama
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
|
||||
type lengthField struct {
|
||||
startOffset int
|
||||
}
|
||||
|
||||
func (l *lengthField) saveOffset(in int) {
|
||||
l.startOffset = in
|
||||
}
|
||||
|
||||
func (l *lengthField) reserveLength() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (l *lengthField) run(curOffset int, buf []byte) error {
|
||||
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lengthField) check(curOffset int, buf []byte) error {
|
||||
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
|
||||
return PacketDecodingError{"length field invalid"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
24
vendor/src/github.com/Shopify/sarama/list_groups_request.go
vendored
Normal file
24
vendor/src/github.com/Shopify/sarama/list_groups_request.go
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package sarama
|
||||
|
||||
type ListGroupsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) key() int16 {
|
||||
return 16
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
7
vendor/src/github.com/Shopify/sarama/list_groups_request_test.go
vendored
Normal file
7
vendor/src/github.com/Shopify/sarama/list_groups_request_test.go
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestListGroupsRequest(t *testing.T) {
|
||||
testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{})
|
||||
}
|
68
vendor/src/github.com/Shopify/sarama/list_groups_response.go
vendored
Normal file
68
vendor/src/github.com/Shopify/sarama/list_groups_response.go
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
package sarama
|
||||
|
||||
type ListGroupsResponse struct {
|
||||
Err KError
|
||||
Groups map[string]string
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
|
||||
if err := pe.putArrayLength(len(r.Groups)); err != nil {
|
||||
return err
|
||||
}
|
||||
for groupId, protocolType := range r.Groups {
|
||||
if err := pe.putString(groupId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putString(protocolType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
|
||||
if kerr, err := pd.getInt16(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Groups = make(map[string]string)
|
||||
for i := 0; i < n; i++ {
|
||||
groupId, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protocolType, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Groups[groupId] = protocolType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) key() int16 {
|
||||
return 16
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
58
vendor/src/github.com/Shopify/sarama/list_groups_response_test.go
vendored
Normal file
58
vendor/src/github.com/Shopify/sarama/list_groups_response_test.go
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
listGroupsResponseEmpty = []byte{
|
||||
0, 0, // no error
|
||||
0, 0, 0, 0, // no groups
|
||||
}
|
||||
|
||||
listGroupsResponseError = []byte{
|
||||
0, 31, // no error
|
||||
0, 0, 0, 0, // ErrClusterAuthorizationFailed
|
||||
}
|
||||
|
||||
listGroupsResponseWithConsumer = []byte{
|
||||
0, 0, // no error
|
||||
0, 0, 0, 1, // 1 group
|
||||
0, 3, 'f', 'o', 'o', // group name
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type
|
||||
}
|
||||
)
|
||||
|
||||
func TestListGroupsResponse(t *testing.T) {
|
||||
var response *ListGroupsResponse
|
||||
|
||||
response = new(ListGroupsResponse)
|
||||
testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Expected no gerror, found:", response.Err)
|
||||
}
|
||||
if len(response.Groups) != 0 {
|
||||
t.Error("Expected no groups")
|
||||
}
|
||||
|
||||
response = new(ListGroupsResponse)
|
||||
testVersionDecodable(t, "no error", response, listGroupsResponseError, 0)
|
||||
if response.Err != ErrClusterAuthorizationFailed {
|
||||
t.Error("Expected no gerror, found:", response.Err)
|
||||
}
|
||||
if len(response.Groups) != 0 {
|
||||
t.Error("Expected no groups")
|
||||
}
|
||||
|
||||
response = new(ListGroupsResponse)
|
||||
testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Expected no gerror, found:", response.Err)
|
||||
}
|
||||
if len(response.Groups) != 1 {
|
||||
t.Error("Expected one group")
|
||||
}
|
||||
if response.Groups["foo"] != "consumer" {
|
||||
t.Error("Expected foo group to use consumer protocol")
|
||||
}
|
||||
}
|
196
vendor/src/github.com/Shopify/sarama/message.go
vendored
Normal file
196
vendor/src/github.com/Shopify/sarama/message.go
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-xerial-snappy"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
|
||||
type CompressionCodec int8
|
||||
|
||||
// only the last two bits are really used
|
||||
const compressionCodecMask int8 = 0x03
|
||||
|
||||
const (
|
||||
CompressionNone CompressionCodec = 0
|
||||
CompressionGZIP CompressionCodec = 1
|
||||
CompressionSnappy CompressionCodec = 2
|
||||
CompressionLZ4 CompressionCodec = 3
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
Codec CompressionCodec // codec used to compress the message contents
|
||||
Key []byte // the message key, may be nil
|
||||
Value []byte // the message contents
|
||||
Set *MessageSet // the message set a message might wrap
|
||||
Version int8 // v1 requires Kafka 0.10
|
||||
Timestamp time.Time // the timestamp of the message (version 1+ only)
|
||||
|
||||
compressedCache []byte
|
||||
compressedSize int // used for computing the compression ratio metrics
|
||||
}
|
||||
|
||||
func (m *Message) encode(pe packetEncoder) error {
|
||||
pe.push(&crc32Field{})
|
||||
|
||||
pe.putInt8(m.Version)
|
||||
|
||||
attributes := int8(m.Codec) & compressionCodecMask
|
||||
pe.putInt8(attributes)
|
||||
|
||||
if m.Version >= 1 {
|
||||
pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond))
|
||||
}
|
||||
|
||||
err := pe.putBytes(m.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var payload []byte
|
||||
|
||||
if m.compressedCache != nil {
|
||||
payload = m.compressedCache
|
||||
m.compressedCache = nil
|
||||
} else if m.Value != nil {
|
||||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
payload = m.Value
|
||||
case CompressionGZIP:
|
||||
var buf bytes.Buffer
|
||||
writer := gzip.NewWriter(&buf)
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
case CompressionSnappy:
|
||||
tmp := snappy.Encode(m.Value)
|
||||
m.compressedCache = tmp
|
||||
payload = m.compressedCache
|
||||
case CompressionLZ4:
|
||||
var buf bytes.Buffer
|
||||
writer := lz4.NewWriter(&buf)
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
|
||||
default:
|
||||
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
|
||||
}
|
||||
// Keep in mind the compressed payload size for metric gathering
|
||||
m.compressedSize = len(payload)
|
||||
}
|
||||
|
||||
if err = pe.putBytes(payload); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (m *Message) decode(pd packetDecoder) (err error) {
|
||||
err = pd.push(&crc32Field{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Version, err = pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attribute, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Codec = CompressionCodec(attribute & compressionCodecMask)
|
||||
|
||||
if m.Version >= 1 {
|
||||
millis, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
m.Key, err = pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Value, err = pd.getBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Required for deep equal assertion during tests but might be useful
|
||||
// for future metrics about the compression ratio in fetch requests
|
||||
m.compressedSize = len(m.Value)
|
||||
|
||||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
// nothing to do
|
||||
case CompressionGZIP:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionSnappy:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
if m.Value, err = snappy.Decode(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionLZ4:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader := lz4.NewReader(bytes.NewReader(m.Value))
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
|
||||
}
|
||||
|
||||
return pd.pop()
|
||||
}
|
||||
|
||||
// decodes a message set from a previousy encoded bulk-message
|
||||
func (m *Message) decodeSet() (err error) {
|
||||
pd := realDecoder{raw: m.Value}
|
||||
m.Set = &MessageSet{}
|
||||
return m.Set.decode(&pd)
|
||||
}
|
89
vendor/src/github.com/Shopify/sarama/message_set.go
vendored
Normal file
89
vendor/src/github.com/Shopify/sarama/message_set.go
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package sarama
|
||||
|
||||
type MessageBlock struct {
|
||||
Offset int64
|
||||
Msg *Message
|
||||
}
|
||||
|
||||
// Messages convenience helper which returns either all the
|
||||
// messages that are wrapped in this block
|
||||
func (msb *MessageBlock) Messages() []*MessageBlock {
|
||||
if msb.Msg.Set != nil {
|
||||
return msb.Msg.Set.Messages
|
||||
}
|
||||
return []*MessageBlock{msb}
|
||||
}
|
||||
|
||||
func (msb *MessageBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt64(msb.Offset)
|
||||
pe.push(&lengthField{})
|
||||
err := msb.Msg.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pe.pop()
|
||||
}
|
||||
|
||||
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
|
||||
if msb.Offset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.push(&lengthField{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msb.Msg = new(Message)
|
||||
if err = msb.Msg.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pd.pop(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type MessageSet struct {
|
||||
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
|
||||
Messages []*MessageBlock
|
||||
}
|
||||
|
||||
func (ms *MessageSet) encode(pe packetEncoder) error {
|
||||
for i := range ms.Messages {
|
||||
err := ms.Messages[i].encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
|
||||
ms.Messages = nil
|
||||
|
||||
for pd.remaining() > 0 {
|
||||
msb := new(MessageBlock)
|
||||
err = msb.decode(pd)
|
||||
switch err {
|
||||
case nil:
|
||||
ms.Messages = append(ms.Messages, msb)
|
||||
case ErrInsufficientData:
|
||||
// As an optimization the server is allowed to return a partial message at the
|
||||
// end of the message set. Clients should handle this case. So we just ignore such things.
|
||||
ms.PartialTrailingMessage = true
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) addMessage(msg *Message) {
|
||||
block := new(MessageBlock)
|
||||
block.Msg = msg
|
||||
ms.Messages = append(ms.Messages, block)
|
||||
}
|
165
vendor/src/github.com/Shopify/sarama/message_test.go
vendored
Normal file
165
vendor/src/github.com/Shopify/sarama/message_test.go
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyMessage = []byte{
|
||||
167, 236, 104, 3, // CRC
|
||||
0x00, // magic version byte
|
||||
0x00, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyGzipMessage = []byte{
|
||||
97, 79, 149, 90, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
// value
|
||||
0x00, 0x00, 0x00, 0x17,
|
||||
0x1f, 0x8b,
|
||||
0x08,
|
||||
0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
emptyLZ4Message = []byte{
|
||||
132, 219, 238, 101, // CRC
|
||||
0x01, // version byte
|
||||
0x03, // attribute flags: lz4
|
||||
0, 0, 1, 88, 141, 205, 89, 56, // timestamp
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0x00, 0x00, 0x00, 0x0f, // len
|
||||
0x04, 0x22, 0x4D, 0x18, // LZ4 magic number
|
||||
100, // LZ4 flags: version 01, block indepedant, content checksum
|
||||
112, 185, 0, 0, 0, 0, // LZ4 data
|
||||
5, 93, 204, 2, // LZ4 checksum
|
||||
}
|
||||
|
||||
emptyBulkSnappyMessage = []byte{
|
||||
180, 47, 53, 209, //CRC
|
||||
0x00, // magic version byte
|
||||
0x02, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0, 0, 0, 42,
|
||||
130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic
|
||||
0, 0, 0, 1, // min version
|
||||
0, 0, 0, 1, // default version
|
||||
0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0}
|
||||
|
||||
emptyBulkGzipMessage = []byte{
|
||||
139, 160, 63, 141, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0x00, 0x00, 0x00, 0x27, // len
|
||||
0x1f, 0x8b, // Gzip Magic
|
||||
0x08, // deflate compressed
|
||||
0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0}
|
||||
|
||||
emptyBulkLZ4Message = []byte{
|
||||
246, 12, 188, 129, // CRC
|
||||
0x01, // Version
|
||||
0x03, // attribute flags (LZ4)
|
||||
255, 255, 249, 209, 212, 181, 73, 201, // timestamp
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
0x00, 0x00, 0x00, 0x47, // len
|
||||
0x04, 0x22, 0x4D, 0x18, // magic number lz4
|
||||
100, // lz4 flags 01100100
|
||||
// version: 01, block indep: 1, block checksum: 0, content size: 0, content checksum: 1, reserved: 00
|
||||
112, 185, 52, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
71, 129, 23, 111, // LZ4 checksum
|
||||
}
|
||||
)
|
||||
|
||||
func TestMessageEncoding(t *testing.T) {
|
||||
message := Message{}
|
||||
testEncodable(t, "empty", &message, emptyMessage)
|
||||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionGZIP
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionLZ4
|
||||
message.Timestamp = time.Unix(1479847795, 0)
|
||||
message.Version = 1
|
||||
testEncodable(t, "empty lz4", &message, emptyLZ4Message)
|
||||
}
|
||||
|
||||
func TestMessageDecoding(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "empty", &message, emptyMessage)
|
||||
if message.Codec != CompressionNone {
|
||||
t.Error("Decoding produced compression codec where there was none.")
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Error("Decoding produced key where there was none.")
|
||||
}
|
||||
if message.Value != nil {
|
||||
t.Error("Decoding produced value where there was none.")
|
||||
}
|
||||
if message.Set != nil {
|
||||
t.Error("Decoding produced set where there was none.")
|
||||
}
|
||||
|
||||
testDecodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
if message.Codec != CompressionGZIP {
|
||||
t.Error("Decoding produced incorrect compression codec (was gzip).")
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Error("Decoding produced key where there was none.")
|
||||
}
|
||||
if message.Value == nil || len(message.Value) != 0 {
|
||||
t.Error("Decoding produced nil or content-ful value where there was an empty array.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingBulkSnappy(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage)
|
||||
if message.Codec != CompressionSnappy {
|
||||
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy)
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
|
||||
}
|
||||
if message.Set == nil {
|
||||
t.Error("Decoding produced no set, but one was expected.")
|
||||
} else if len(message.Set.Messages) != 2 {
|
||||
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingBulkGzip(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage)
|
||||
if message.Codec != CompressionGZIP {
|
||||
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP)
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
|
||||
}
|
||||
if message.Set == nil {
|
||||
t.Error("Decoding produced no set, but one was expected.")
|
||||
} else if len(message.Set.Messages) != 2 {
|
||||
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageDecodingBulkLZ4(t *testing.T) {
|
||||
message := Message{}
|
||||
testDecodable(t, "bulk lz4", &message, emptyBulkLZ4Message)
|
||||
if message.Codec != CompressionLZ4 {
|
||||
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionLZ4)
|
||||
}
|
||||
if message.Key != nil {
|
||||
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
|
||||
}
|
||||
if message.Set == nil {
|
||||
t.Error("Decoding produced no set, but one was expected.")
|
||||
} else if len(message.Set.Messages) != 2 {
|
||||
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
|
||||
}
|
||||
}
|
52
vendor/src/github.com/Shopify/sarama/metadata_request.go
vendored
Normal file
52
vendor/src/github.com/Shopify/sarama/metadata_request.go
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package sarama
|
||||
|
||||
type MetadataRequest struct {
|
||||
Topics []string
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range r.Topics {
|
||||
err = pe.putString(r.Topics[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Topics = make([]string, topicCount)
|
||||
for i := range r.Topics {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Topics[i] = topic
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) key() int16 {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
29
vendor/src/github.com/Shopify/sarama/metadata_request_test.go
vendored
Normal file
29
vendor/src/github.com/Shopify/sarama/metadata_request_test.go
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
metadataRequestNoTopics = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
metadataRequestOneTopic = []byte{
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'}
|
||||
|
||||
metadataRequestThreeTopics = []byte{
|
||||
0x00, 0x00, 0x00, 0x03,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x03, 'b', 'a', 'z'}
|
||||
)
|
||||
|
||||
func TestMetadataRequest(t *testing.T) {
|
||||
request := new(MetadataRequest)
|
||||
testRequest(t, "no topics", request, metadataRequestNoTopics)
|
||||
|
||||
request.Topics = []string{"topic1"}
|
||||
testRequest(t, "one topic", request, metadataRequestOneTopic)
|
||||
|
||||
request.Topics = []string{"foo", "bar", "baz"}
|
||||
testRequest(t, "three topics", request, metadataRequestThreeTopics)
|
||||
}
|
239
vendor/src/github.com/Shopify/sarama/metadata_response.go
vendored
Normal file
239
vendor/src/github.com/Shopify/sarama/metadata_response.go
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
package sarama
|
||||
|
||||
type PartitionMetadata struct {
|
||||
Err KError
|
||||
ID int32
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isr []int32
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pm.Err = KError(tmp)
|
||||
|
||||
pm.ID, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Leader, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Replicas, err = pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.Isr, err = pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(pm.Err))
|
||||
pe.putInt32(pm.ID)
|
||||
pe.putInt32(pm.Leader)
|
||||
|
||||
err = pe.putInt32Array(pm.Replicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putInt32Array(pm.Isr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type TopicMetadata struct {
|
||||
Err KError
|
||||
Name string
|
||||
Partitions []*PartitionMetadata
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tm.Err = KError(tmp)
|
||||
|
||||
tm.Name, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tm.Partitions = make([]*PartitionMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
tm.Partitions[i] = new(PartitionMetadata)
|
||||
err = tm.Partitions[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt16(int16(tm.Err))
|
||||
|
||||
err = pe.putString(tm.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(tm.Partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pm := range tm.Partitions {
|
||||
err = pm.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type MetadataResponse struct {
|
||||
Brokers []*Broker
|
||||
Topics []*TopicMetadata
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Brokers = make([]*Broker, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Brokers[i] = new(Broker)
|
||||
err = r.Brokers[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Topics = make([]*TopicMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Topics[i] = new(TopicMetadata)
|
||||
err = r.Topics[i].decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Brokers))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, broker := range r.Brokers {
|
||||
err = broker.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tm := range r.Topics {
|
||||
err = tm.encode(pe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) key() int16 {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
|
||||
// testing API
|
||||
|
||||
func (r *MetadataResponse) AddBroker(addr string, id int32) {
|
||||
r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
|
||||
var tmatch *TopicMetadata
|
||||
|
||||
for _, tm := range r.Topics {
|
||||
if tm.Name == topic {
|
||||
tmatch = tm
|
||||
goto foundTopic
|
||||
}
|
||||
}
|
||||
|
||||
tmatch = new(TopicMetadata)
|
||||
tmatch.Name = topic
|
||||
r.Topics = append(r.Topics, tmatch)
|
||||
|
||||
foundTopic:
|
||||
|
||||
tmatch.Err = err
|
||||
return tmatch
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
|
||||
tmatch := r.AddTopic(topic, ErrNoError)
|
||||
var pmatch *PartitionMetadata
|
||||
|
||||
for _, pm := range tmatch.Partitions {
|
||||
if pm.ID == partition {
|
||||
pmatch = pm
|
||||
goto foundPartition
|
||||
}
|
||||
}
|
||||
|
||||
pmatch = new(PartitionMetadata)
|
||||
pmatch.ID = partition
|
||||
tmatch.Partitions = append(tmatch.Partitions, pmatch)
|
||||
|
||||
foundPartition:
|
||||
|
||||
pmatch.Leader = brokerID
|
||||
pmatch.Replicas = replicas
|
||||
pmatch.Isr = isr
|
||||
pmatch.Err = err
|
||||
|
||||
}
|
139
vendor/src/github.com/Shopify/sarama/metadata_response_test.go
vendored
Normal file
139
vendor/src/github.com/Shopify/sarama/metadata_response_test.go
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyMetadataResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
brokersNoTopicsMetadataResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00, 0xab, 0xff,
|
||||
0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
|
||||
0x00, 0x00, 0x00, 0x33,
|
||||
|
||||
0x00, 0x01, 0x02, 0x03,
|
||||
0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
|
||||
0x00, 0x00, 0x01, 0x11,
|
||||
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
topicsNoBrokersMetadataResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x07,
|
||||
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x00,
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestEmptyMetadataResponse(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "empty", &response, emptyMetadataResponse, 0)
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
if len(response.Topics) != 0 {
|
||||
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithBrokers(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse, 0)
|
||||
if len(response.Brokers) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
|
||||
}
|
||||
|
||||
if response.Brokers[0].id != 0xabff {
|
||||
t.Error("Decoding produced invalid broker 0 id.")
|
||||
}
|
||||
if response.Brokers[0].addr != "localhost:51" {
|
||||
t.Error("Decoding produced invalid broker 0 address.")
|
||||
}
|
||||
if response.Brokers[1].id != 0x010203 {
|
||||
t.Error("Decoding produced invalid broker 1 id.")
|
||||
}
|
||||
if response.Brokers[1].addr != "google.com:273" {
|
||||
t.Error("Decoding produced invalid broker 1 address.")
|
||||
}
|
||||
|
||||
if len(response.Topics) != 0 {
|
||||
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithTopics(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse, 0)
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
|
||||
if len(response.Topics) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!")
|
||||
}
|
||||
|
||||
if response.Topics[0].Err != ErrNoError {
|
||||
t.Error("Decoding produced invalid topic 0 error.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Name != "foo" {
|
||||
t.Error("Decoding produced invalid topic 0 name.")
|
||||
}
|
||||
|
||||
if len(response.Topics[0].Partitions) != 1 {
|
||||
t.Fatal("Decoding produced invalid partition count for topic 0.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 error.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Partitions[0].ID != 0x01 {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 id.")
|
||||
}
|
||||
|
||||
if response.Topics[0].Partitions[0].Leader != 0x07 {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 leader.")
|
||||
}
|
||||
|
||||
if len(response.Topics[0].Partitions[0].Replicas) != 3 {
|
||||
t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.")
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(response.Topics[0].Partitions[0].Isr) != 0 {
|
||||
t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
|
||||
}
|
||||
|
||||
if response.Topics[1].Err != ErrNoError {
|
||||
t.Error("Decoding produced invalid topic 1 error.")
|
||||
}
|
||||
|
||||
if response.Topics[1].Name != "bar" {
|
||||
t.Error("Decoding produced invalid topic 0 name.")
|
||||
}
|
||||
|
||||
if len(response.Topics[1].Partitions) != 0 {
|
||||
t.Error("Decoding produced invalid partition count for topic 1.")
|
||||
}
|
||||
}
|
51
vendor/src/github.com/Shopify/sarama/metrics.go
vendored
Normal file
51
vendor/src/github.com/Shopify/sarama/metrics.go
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
|
||||
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
|
||||
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
|
||||
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
|
||||
const (
|
||||
metricsReservoirSize = 1028
|
||||
metricsAlphaFactor = 0.015
|
||||
)
|
||||
|
||||
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
|
||||
return r.GetOrRegister(name, func() metrics.Histogram {
|
||||
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
|
||||
}).(metrics.Histogram)
|
||||
}
|
||||
|
||||
func getMetricNameForBroker(name string, broker *Broker) string {
|
||||
// Use broker id like the Java client as it does not contain '.' or ':' characters that
|
||||
// can be interpreted as special character by monitoring tool (e.g. Graphite)
|
||||
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
|
||||
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
|
||||
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getMetricNameForTopic(name string, topic string) string {
|
||||
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
|
||||
// cf. KAFKA-1902 and KAFKA-2337
|
||||
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
|
||||
}
|
||||
|
||||
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
|
||||
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
|
||||
}
|
||||
|
||||
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
|
||||
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
|
||||
}
|
172
vendor/src/github.com/Shopify/sarama/metrics_test.go
vendored
Normal file
172
vendor/src/github.com/Shopify/sarama/metrics_test.go
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
func TestGetOrRegisterHistogram(t *testing.T) {
|
||||
metricRegistry := metrics.NewRegistry()
|
||||
histogram := getOrRegisterHistogram("name", metricRegistry)
|
||||
|
||||
if histogram == nil {
|
||||
t.Error("Unexpected nil histogram")
|
||||
}
|
||||
|
||||
// Fetch the metric
|
||||
foundHistogram := metricRegistry.Get("name")
|
||||
|
||||
if foundHistogram != histogram {
|
||||
t.Error("Unexpected different histogram", foundHistogram, histogram)
|
||||
}
|
||||
|
||||
// Try to register the metric again
|
||||
sameHistogram := getOrRegisterHistogram("name", metricRegistry)
|
||||
|
||||
if sameHistogram != histogram {
|
||||
t.Error("Unexpected different histogram", sameHistogram, histogram)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMetricNameForBroker(t *testing.T) {
|
||||
metricName := getMetricNameForBroker("name", &Broker{id: 1})
|
||||
|
||||
if metricName != "name-for-broker-1" {
|
||||
t.Error("Unexpected metric name", metricName)
|
||||
}
|
||||
}
|
||||
|
||||
// Common type and functions for metric validation
|
||||
type metricValidator struct {
|
||||
name string
|
||||
validator func(*testing.T, interface{})
|
||||
}
|
||||
|
||||
type metricValidators []*metricValidator
|
||||
|
||||
func newMetricValidators() metricValidators {
|
||||
return make([]*metricValidator, 0, 32)
|
||||
}
|
||||
|
||||
func (m *metricValidators) register(validator *metricValidator) {
|
||||
*m = append(*m, validator)
|
||||
}
|
||||
|
||||
func (m *metricValidators) registerForBroker(broker *Broker, validator *metricValidator) {
|
||||
m.register(&metricValidator{getMetricNameForBroker(validator.name, broker), validator.validator})
|
||||
}
|
||||
|
||||
func (m *metricValidators) registerForGlobalAndTopic(topic string, validator *metricValidator) {
|
||||
m.register(&metricValidator{validator.name, validator.validator})
|
||||
m.register(&metricValidator{getMetricNameForTopic(validator.name, topic), validator.validator})
|
||||
}
|
||||
|
||||
func (m *metricValidators) registerForAllBrokers(broker *Broker, validator *metricValidator) {
|
||||
m.register(validator)
|
||||
m.registerForBroker(broker, validator)
|
||||
}
|
||||
|
||||
func (m metricValidators) run(t *testing.T, r metrics.Registry) {
|
||||
for _, metricValidator := range m {
|
||||
metric := r.Get(metricValidator.name)
|
||||
if metric == nil {
|
||||
t.Error("No metric named", metricValidator.name)
|
||||
} else {
|
||||
metricValidator.validator(t, metric)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func meterValidator(name string, extraValidator func(*testing.T, metrics.Meter)) *metricValidator {
|
||||
return &metricValidator{
|
||||
name: name,
|
||||
validator: func(t *testing.T, metric interface{}) {
|
||||
if meter, ok := metric.(metrics.Meter); !ok {
|
||||
t.Errorf("Expected meter metric for '%s', got %T", name, metric)
|
||||
} else {
|
||||
extraValidator(t, meter)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func countMeterValidator(name string, expectedCount int) *metricValidator {
|
||||
return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
|
||||
count := meter.Count()
|
||||
if count != int64(expectedCount) {
|
||||
t.Errorf("Expected meter metric '%s' count = %d, got %d", name, expectedCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minCountMeterValidator(name string, minCount int) *metricValidator {
|
||||
return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
|
||||
count := meter.Count()
|
||||
if count < int64(minCount) {
|
||||
t.Errorf("Expected meter metric '%s' count >= %d, got %d", name, minCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func histogramValidator(name string, extraValidator func(*testing.T, metrics.Histogram)) *metricValidator {
|
||||
return &metricValidator{
|
||||
name: name,
|
||||
validator: func(t *testing.T, metric interface{}) {
|
||||
if histogram, ok := metric.(metrics.Histogram); !ok {
|
||||
t.Errorf("Expected histogram metric for '%s', got %T", name, metric)
|
||||
} else {
|
||||
extraValidator(t, histogram)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func countHistogramValidator(name string, expectedCount int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
count := histogram.Count()
|
||||
if count != int64(expectedCount) {
|
||||
t.Errorf("Expected histogram metric '%s' count = %d, got %d", name, expectedCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minCountHistogramValidator(name string, minCount int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
count := histogram.Count()
|
||||
if count < int64(minCount) {
|
||||
t.Errorf("Expected histogram metric '%s' count >= %d, got %d", name, minCount, count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minMaxHistogramValidator(name string, expectedMin int, expectedMax int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
min := int(histogram.Min())
|
||||
if min != expectedMin {
|
||||
t.Errorf("Expected histogram metric '%s' min = %d, got %d", name, expectedMin, min)
|
||||
}
|
||||
max := int(histogram.Max())
|
||||
if max != expectedMax {
|
||||
t.Errorf("Expected histogram metric '%s' max = %d, got %d", name, expectedMax, max)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func minValHistogramValidator(name string, minMin int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
min := int(histogram.Min())
|
||||
if min < minMin {
|
||||
t.Errorf("Expected histogram metric '%s' min >= %d, got %d", name, minMin, min)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func maxValHistogramValidator(name string, maxMax int) *metricValidator {
|
||||
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
|
||||
max := int(histogram.Max())
|
||||
if max > maxMax {
|
||||
t.Errorf("Expected histogram metric '%s' max <= %d, got %d", name, maxMax, max)
|
||||
}
|
||||
})
|
||||
}
|
324
vendor/src/github.com/Shopify/sarama/mockbroker.go
vendored
Normal file
324
vendor/src/github.com/Shopify/sarama/mockbroker.go
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
const (
|
||||
expectationTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
type requestHandlerFunc func(req *request) (res encoder)
|
||||
|
||||
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
|
||||
// and will provides the number of bytes read and written.
|
||||
type RequestNotifierFunc func(bytesRead, bytesWritten int)
|
||||
|
||||
// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
|
||||
// to facilitate testing of higher level or specialized consumers and producers
|
||||
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
|
||||
// but rather provides a facility to do that. It takes care of the TCP
|
||||
// transport, request unmarshaling, response marshaling, and makes it the test
|
||||
// writer responsibility to program correct according to the Kafka API protocol
|
||||
// MockBroker behaviour.
|
||||
//
|
||||
// MockBroker is implemented as a TCP server listening on a kernel-selected
|
||||
// localhost port that can accept many connections. It reads Kafka requests
|
||||
// from that connection and returns responses programmed by the SetHandlerByMap
|
||||
// function. If a MockBroker receives a request that it has no programmed
|
||||
// response for, then it returns nothing and the request times out.
|
||||
//
|
||||
// A set of MockRequest builders to define mappings used by MockBroker is
|
||||
// provided by Sarama. But users can develop MockRequests of their own and use
|
||||
// them along with or instead of the standard ones.
|
||||
//
|
||||
// When running tests with MockBroker it is strongly recommended to specify
|
||||
// a timeout to `go test` so that if the broker hangs waiting for a response,
|
||||
// the test panics.
|
||||
//
|
||||
// It is not necessary to prefix message length or correlation ID to your
|
||||
// response bytes, the server does that automatically as a convenience.
|
||||
type MockBroker struct {
|
||||
brokerID int32
|
||||
port int32
|
||||
closing chan none
|
||||
stopper chan none
|
||||
expectations chan encoder
|
||||
listener net.Listener
|
||||
t TestReporter
|
||||
latency time.Duration
|
||||
handler requestHandlerFunc
|
||||
notifier RequestNotifierFunc
|
||||
history []RequestResponse
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// RequestResponse represents a Request/Response pair processed by MockBroker.
|
||||
type RequestResponse struct {
|
||||
Request protocolBody
|
||||
Response encoder
|
||||
}
|
||||
|
||||
// SetLatency makes broker pause for the specified period every time before
|
||||
// replying.
|
||||
func (b *MockBroker) SetLatency(latency time.Duration) {
|
||||
b.latency = latency
|
||||
}
|
||||
|
||||
// SetHandlerByMap defines mapping of Request types to MockResponses. When a
|
||||
// request is received by the broker, it looks up the request type in the map
|
||||
// and uses the found MockResponse instance to generate an appropriate reply.
|
||||
// If the request type is not found in the map then nothing is sent.
|
||||
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
|
||||
b.setHandler(func(req *request) (res encoder) {
|
||||
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
|
||||
mockResponse := handlerMap[reqTypeName]
|
||||
if mockResponse == nil {
|
||||
return nil
|
||||
}
|
||||
return mockResponse.For(req.body)
|
||||
})
|
||||
}
|
||||
|
||||
// SetNotifier set a function that will get invoked whenever a request has been
|
||||
// processed successfully and will provide the number of bytes read and written
|
||||
func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
|
||||
b.lock.Lock()
|
||||
b.notifier = notifier
|
||||
b.lock.Unlock()
|
||||
}
|
||||
|
||||
// BrokerID returns broker ID assigned to the broker.
|
||||
func (b *MockBroker) BrokerID() int32 {
|
||||
return b.brokerID
|
||||
}
|
||||
|
||||
// History returns a slice of RequestResponse pairs in the order they were
|
||||
// processed by the broker. Note that in case of multiple connections to the
|
||||
// broker the order expected by a test can be different from the order recorded
|
||||
// in the history, unless some synchronization is implemented in the test.
|
||||
func (b *MockBroker) History() []RequestResponse {
|
||||
b.lock.Lock()
|
||||
history := make([]RequestResponse, len(b.history))
|
||||
copy(history, b.history)
|
||||
b.lock.Unlock()
|
||||
return history
|
||||
}
|
||||
|
||||
// Port returns the TCP port number the broker is listening for requests on.
|
||||
func (b *MockBroker) Port() int32 {
|
||||
return b.port
|
||||
}
|
||||
|
||||
// Addr returns the broker connection string in the form "<address>:<port>".
|
||||
func (b *MockBroker) Addr() string {
|
||||
return b.listener.Addr().String()
|
||||
}
|
||||
|
||||
// Close terminates the broker blocking until it stops internal goroutines and
|
||||
// releases all resources.
|
||||
func (b *MockBroker) Close() {
|
||||
close(b.expectations)
|
||||
if len(b.expectations) > 0 {
|
||||
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
|
||||
for e := range b.expectations {
|
||||
_, _ = buf.WriteString(spew.Sdump(e))
|
||||
}
|
||||
b.t.Error(buf.String())
|
||||
}
|
||||
close(b.closing)
|
||||
<-b.stopper
|
||||
}
|
||||
|
||||
// setHandler sets the specified function as the request handler. Whenever
|
||||
// a mock broker reads a request from the wire it passes the request to the
|
||||
// function and sends back whatever the handler function returns.
|
||||
func (b *MockBroker) setHandler(handler requestHandlerFunc) {
|
||||
b.lock.Lock()
|
||||
b.handler = handler
|
||||
b.lock.Unlock()
|
||||
}
|
||||
|
||||
func (b *MockBroker) serverLoop() {
|
||||
defer close(b.stopper)
|
||||
var err error
|
||||
var conn net.Conn
|
||||
|
||||
go func() {
|
||||
<-b.closing
|
||||
err := b.listener.Close()
|
||||
if err != nil {
|
||||
b.t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
i := 0
|
||||
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
|
||||
wg.Add(1)
|
||||
go b.handleRequests(conn, i, wg)
|
||||
i++
|
||||
}
|
||||
wg.Wait()
|
||||
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
|
||||
var err error
|
||||
|
||||
abort := make(chan none)
|
||||
defer close(abort)
|
||||
go func() {
|
||||
select {
|
||||
case <-b.closing:
|
||||
_ = conn.Close()
|
||||
case <-abort:
|
||||
}
|
||||
}()
|
||||
|
||||
resHeader := make([]byte, 8)
|
||||
for {
|
||||
req, bytesRead, err := decodeRequest(conn)
|
||||
if err != nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
|
||||
if b.latency > 0 {
|
||||
time.Sleep(b.latency)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
res := b.handler(req)
|
||||
b.history = append(b.history, RequestResponse{req.body, res})
|
||||
b.lock.Unlock()
|
||||
|
||||
if res == nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
|
||||
continue
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
|
||||
|
||||
encodedRes, err := encode(res, nil)
|
||||
if err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if len(encodedRes) == 0 {
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, 0)
|
||||
}
|
||||
b.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
|
||||
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
|
||||
if _, err = conn.Write(resHeader); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if _, err = conn.Write(encodedRes); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
|
||||
}
|
||||
b.lock.Unlock()
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
|
||||
select {
|
||||
case res, ok := <-b.expectations:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return res
|
||||
case <-time.After(expectationTimeout):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *MockBroker) serverError(err error) {
|
||||
isConnectionClosedError := false
|
||||
if _, ok := err.(*net.OpError); ok {
|
||||
isConnectionClosedError = true
|
||||
} else if err == io.EOF {
|
||||
isConnectionClosedError = true
|
||||
} else if err.Error() == "use of closed network connection" {
|
||||
isConnectionClosedError = true
|
||||
}
|
||||
|
||||
if isConnectionClosedError {
|
||||
return
|
||||
}
|
||||
|
||||
b.t.Errorf(err.Error())
|
||||
}
|
||||
|
||||
// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
|
||||
// test framework and a channel of responses to use. If an error occurs it is
|
||||
// simply logged to the TestReporter and the broker exits.
|
||||
func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
|
||||
return NewMockBrokerAddr(t, brokerID, "localhost:0")
|
||||
}
|
||||
|
||||
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
|
||||
// it rather than just some ephemeral port.
|
||||
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
|
||||
var err error
|
||||
|
||||
broker := &MockBroker{
|
||||
closing: make(chan none),
|
||||
stopper: make(chan none),
|
||||
t: t,
|
||||
brokerID: brokerID,
|
||||
expectations: make(chan encoder, 512),
|
||||
}
|
||||
broker.handler = broker.defaultRequestHandler
|
||||
|
||||
broker.listener, err = net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
|
||||
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp, err := strconv.ParseInt(portStr, 10, 32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
broker.port = int32(tmp)
|
||||
|
||||
go broker.serverLoop()
|
||||
|
||||
return broker
|
||||
}
|
||||
|
||||
func (b *MockBroker) Returns(e encoder) {
|
||||
b.expectations <- e
|
||||
}
|
455
vendor/src/github.com/Shopify/sarama/mockresponses.go
vendored
Normal file
455
vendor/src/github.com/Shopify/sarama/mockresponses.go
vendored
Normal file
@ -0,0 +1,455 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// TestReporter has methods matching go's testing.T to avoid importing
|
||||
// `testing` in the main part of the library.
|
||||
type TestReporter interface {
|
||||
Error(...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
}
|
||||
|
||||
// MockResponse is a response builder interface it defines one method that
|
||||
// allows generating a response based on a request body. MockResponses are used
|
||||
// to program behavior of MockBroker in tests.
|
||||
type MockResponse interface {
|
||||
For(reqBody versionedDecoder) (res encoder)
|
||||
}
|
||||
|
||||
// MockWrapper is a mock response builder that returns a particular concrete
|
||||
// response regardless of the actual request passed to the `For` method.
|
||||
type MockWrapper struct {
|
||||
res encoder
|
||||
}
|
||||
|
||||
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
|
||||
return mw.res
|
||||
}
|
||||
|
||||
func NewMockWrapper(res encoder) *MockWrapper {
|
||||
return &MockWrapper{res: res}
|
||||
}
|
||||
|
||||
// MockSequence is a mock response builder that is created from a sequence of
|
||||
// concrete responses. Every time when a `MockBroker` calls its `For` method
|
||||
// the next response from the sequence is returned. When the end of the
|
||||
// sequence is reached the last element from the sequence is returned.
|
||||
type MockSequence struct {
|
||||
responses []MockResponse
|
||||
}
|
||||
|
||||
func NewMockSequence(responses ...interface{}) *MockSequence {
|
||||
ms := &MockSequence{}
|
||||
ms.responses = make([]MockResponse, len(responses))
|
||||
for i, res := range responses {
|
||||
switch res := res.(type) {
|
||||
case MockResponse:
|
||||
ms.responses[i] = res
|
||||
case encoder:
|
||||
ms.responses[i] = NewMockWrapper(res)
|
||||
default:
|
||||
panic(fmt.Sprintf("Unexpected response type: %T", res))
|
||||
}
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
|
||||
res = mc.responses[0].For(reqBody)
|
||||
if len(mc.responses) > 1 {
|
||||
mc.responses = mc.responses[1:]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MockMetadataResponse is a `MetadataResponse` builder.
|
||||
type MockMetadataResponse struct {
|
||||
leaders map[string]map[int32]int32
|
||||
brokers map[string]int32
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
|
||||
return &MockMetadataResponse{
|
||||
leaders: make(map[string]map[int32]int32),
|
||||
brokers: make(map[string]int32),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
|
||||
partitions := mmr.leaders[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]int32)
|
||||
mmr.leaders[topic] = partitions
|
||||
}
|
||||
partitions[partition] = brokerID
|
||||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
|
||||
mmr.brokers[addr] = brokerID
|
||||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
metadataRequest := reqBody.(*MetadataRequest)
|
||||
metadataResponse := &MetadataResponse{}
|
||||
for addr, brokerID := range mmr.brokers {
|
||||
metadataResponse.AddBroker(addr, brokerID)
|
||||
}
|
||||
if len(metadataRequest.Topics) == 0 {
|
||||
for topic, partitions := range mmr.leaders {
|
||||
for partition, brokerID := range partitions {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
}
|
||||
for _, topic := range metadataRequest.Topics {
|
||||
for partition, brokerID := range mmr.leaders[topic] {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
}
|
||||
|
||||
// MockOffsetResponse is an `OffsetResponse` builder.
|
||||
type MockOffsetResponse struct {
|
||||
offsets map[string]map[int32]map[int64]int64
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
|
||||
return &MockOffsetResponse{
|
||||
offsets: make(map[string]map[int32]map[int64]int64),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
|
||||
partitions := mor.offsets[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]map[int64]int64)
|
||||
mor.offsets[topic] = partitions
|
||||
}
|
||||
times := partitions[partition]
|
||||
if times == nil {
|
||||
times = make(map[int64]int64)
|
||||
partitions[partition] = times
|
||||
}
|
||||
times[time] = offset
|
||||
return mor
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
|
||||
offsetRequest := reqBody.(*OffsetRequest)
|
||||
offsetResponse := &OffsetResponse{}
|
||||
for topic, partitions := range offsetRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
offset := mor.getOffset(topic, partition, block.time)
|
||||
offsetResponse.AddTopicPartition(topic, partition, offset)
|
||||
}
|
||||
}
|
||||
return offsetResponse
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
|
||||
partitions := mor.offsets[topic]
|
||||
if partitions == nil {
|
||||
mor.t.Errorf("missing topic: %s", topic)
|
||||
}
|
||||
times := partitions[partition]
|
||||
if times == nil {
|
||||
mor.t.Errorf("missing partition: %d", partition)
|
||||
}
|
||||
offset, ok := times[time]
|
||||
if !ok {
|
||||
mor.t.Errorf("missing time: %d", time)
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
// MockFetchResponse is a `FetchResponse` builder.
|
||||
type MockFetchResponse struct {
|
||||
messages map[string]map[int32]map[int64]Encoder
|
||||
highWaterMarks map[string]map[int32]int64
|
||||
t TestReporter
|
||||
batchSize int
|
||||
}
|
||||
|
||||
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
|
||||
return &MockFetchResponse{
|
||||
messages: make(map[string]map[int32]map[int64]Encoder),
|
||||
highWaterMarks: make(map[string]map[int32]int64),
|
||||
t: t,
|
||||
batchSize: batchSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]map[int64]Encoder)
|
||||
mfr.messages[topic] = partitions
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
messages = make(map[int64]Encoder)
|
||||
partitions[partition] = messages
|
||||
}
|
||||
messages[offset] = msg
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
|
||||
partitions := mfr.highWaterMarks[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]int64)
|
||||
mfr.highWaterMarks[topic] = partitions
|
||||
}
|
||||
partitions[partition] = offset
|
||||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
fetchRequest := reqBody.(*FetchRequest)
|
||||
res := &FetchResponse{}
|
||||
for topic, partitions := range fetchRequest.blocks {
|
||||
for partition, block := range partitions {
|
||||
initialOffset := block.fetchOffset
|
||||
offset := initialOffset
|
||||
maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
|
||||
for i := 0; i < mfr.batchSize && offset < maxOffset; {
|
||||
msg := mfr.getMessage(topic, partition, offset)
|
||||
if msg != nil {
|
||||
res.AddMessage(topic, partition, nil, msg, offset)
|
||||
i++
|
||||
}
|
||||
offset++
|
||||
}
|
||||
fb := res.GetBlock(topic, partition)
|
||||
if fb == nil {
|
||||
res.AddError(topic, partition, ErrNoError)
|
||||
fb = res.GetBlock(topic, partition)
|
||||
}
|
||||
fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
return nil
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
return nil
|
||||
}
|
||||
return messages[offset]
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
|
||||
partitions := mfr.messages[topic]
|
||||
if partitions == nil {
|
||||
return 0
|
||||
}
|
||||
messages := partitions[partition]
|
||||
if messages == nil {
|
||||
return 0
|
||||
}
|
||||
return len(messages)
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
|
||||
partitions := mfr.highWaterMarks[topic]
|
||||
if partitions == nil {
|
||||
return 0
|
||||
}
|
||||
return partitions[partition]
|
||||
}
|
||||
|
||||
// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
|
||||
type MockConsumerMetadataResponse struct {
|
||||
coordinators map[string]interface{}
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
|
||||
return &MockConsumerMetadataResponse{
|
||||
coordinators: make(map[string]interface{}),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
|
||||
mr.coordinators[group] = broker
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
|
||||
mr.coordinators[group] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*ConsumerMetadataRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &ConsumerMetadataResponse{}
|
||||
v := mr.coordinators[group]
|
||||
switch v := v.(type) {
|
||||
case *MockBroker:
|
||||
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
|
||||
case KError:
|
||||
res.Err = v
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
|
||||
type MockOffsetCommitResponse struct {
|
||||
errors map[string]map[string]map[int32]KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
|
||||
return &MockOffsetCommitResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
|
||||
if mr.errors == nil {
|
||||
mr.errors = make(map[string]map[string]map[int32]KError)
|
||||
}
|
||||
topics := mr.errors[group]
|
||||
if topics == nil {
|
||||
topics = make(map[string]map[int32]KError)
|
||||
mr.errors[group] = topics
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
topics[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*OffsetCommitRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetCommitResponse{}
|
||||
for topic, partitions := range req.blocks {
|
||||
for partition := range partitions {
|
||||
res.AddError(topic, partition, mr.getError(group, topic, partition))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
|
||||
topics := mr.errors[group]
|
||||
if topics == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
kerror, ok := partitions[partition]
|
||||
if !ok {
|
||||
return ErrNoError
|
||||
}
|
||||
return kerror
|
||||
}
|
||||
|
||||
// MockProduceResponse is a `ProduceResponse` builder.
|
||||
type MockProduceResponse struct {
|
||||
errors map[string]map[int32]KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
|
||||
return &MockProduceResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
|
||||
if mr.errors == nil {
|
||||
mr.errors = make(map[string]map[int32]KError)
|
||||
}
|
||||
partitions := mr.errors[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
mr.errors[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*ProduceRequest)
|
||||
res := &ProduceResponse{}
|
||||
for topic, partitions := range req.msgSets {
|
||||
for partition := range partitions {
|
||||
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
|
||||
partitions := mr.errors[topic]
|
||||
if partitions == nil {
|
||||
return ErrNoError
|
||||
}
|
||||
kerror, ok := partitions[partition]
|
||||
if !ok {
|
||||
return ErrNoError
|
||||
}
|
||||
return kerror
|
||||
}
|
||||
|
||||
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
|
||||
type MockOffsetFetchResponse struct {
|
||||
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
|
||||
return &MockOffsetFetchResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
|
||||
if mr.offsets == nil {
|
||||
mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
}
|
||||
topics := mr.offsets[group]
|
||||
if topics == nil {
|
||||
topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
mr.offsets[group] = topics
|
||||
}
|
||||
partitions := topics[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*OffsetFetchResponseBlock)
|
||||
topics[topic] = partitions
|
||||
}
|
||||
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*OffsetFetchRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetFetchResponse{}
|
||||
for topic, partitions := range mr.offsets[group] {
|
||||
for partition, block := range partitions {
|
||||
res.AddBlock(topic, partition, block)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
13
vendor/src/github.com/Shopify/sarama/mocks/README.md
vendored
Normal file
13
vendor/src/github.com/Shopify/sarama/mocks/README.md
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# sarama/mocks
|
||||
|
||||
The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types.
|
||||
You can use them to test your sarama applications using dependency injection.
|
||||
|
||||
The following mock objects are available:
|
||||
|
||||
- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks.
|
||||
- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer)
|
||||
- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer)
|
||||
|
||||
The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified,
|
||||
and the results will be reported to the `*testing.T` object you provided when creating the mock.
|
174
vendor/src/github.com/Shopify/sarama/mocks/async_producer.go
vendored
Normal file
174
vendor/src/github.com/Shopify/sarama/mocks/async_producer.go
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// AsyncProducer implements sarama's Producer interface for testing purposes.
|
||||
// Before you can send messages to it's Input channel, you have to set expectations
|
||||
// so it knows how to handle the input; it returns an error if the number of messages
|
||||
// received is bigger then the number of expectations set. You can also set a
|
||||
// function in each expectation so that the message value is checked by this function
|
||||
// and an error is returned if the match fails.
|
||||
type AsyncProducer struct {
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
expectations []*producerExpectation
|
||||
closed chan struct{}
|
||||
input chan *sarama.ProducerMessage
|
||||
successes chan *sarama.ProducerMessage
|
||||
errors chan *sarama.ProducerError
|
||||
lastOffset int64
|
||||
}
|
||||
|
||||
// NewAsyncProducer instantiates a new Producer mock. The t argument should
|
||||
// be the *testing.T instance of your test method. An error will be written to it if
|
||||
// an expectation is violated. The config argument is used to determine whether it
|
||||
// should ack successes on the Successes channel.
|
||||
func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer {
|
||||
if config == nil {
|
||||
config = sarama.NewConfig()
|
||||
}
|
||||
mp := &AsyncProducer{
|
||||
t: t,
|
||||
closed: make(chan struct{}, 0),
|
||||
expectations: make([]*producerExpectation, 0),
|
||||
input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
|
||||
successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
|
||||
errors: make(chan *sarama.ProducerError, config.ChannelBufferSize),
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
close(mp.successes)
|
||||
close(mp.errors)
|
||||
}()
|
||||
|
||||
for msg := range mp.input {
|
||||
mp.l.Lock()
|
||||
if mp.expectations == nil || len(mp.expectations) == 0 {
|
||||
mp.expectations = nil
|
||||
mp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
|
||||
} else {
|
||||
expectation := mp.expectations[0]
|
||||
mp.expectations = mp.expectations[1:]
|
||||
if expectation.CheckFunction != nil {
|
||||
if val, err := msg.Value.Encode(); err != nil {
|
||||
mp.t.Errorf("Input message encoding failed: %s", err.Error())
|
||||
mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
|
||||
} else {
|
||||
err = expectation.CheckFunction(val)
|
||||
if err != nil {
|
||||
mp.t.Errorf("Check function returned an error: %s", err.Error())
|
||||
mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
|
||||
}
|
||||
}
|
||||
}
|
||||
if expectation.Result == errProduceSuccess {
|
||||
mp.lastOffset++
|
||||
if config.Producer.Return.Successes {
|
||||
msg.Offset = mp.lastOffset
|
||||
mp.successes <- msg
|
||||
}
|
||||
} else {
|
||||
if config.Producer.Return.Errors {
|
||||
mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg}
|
||||
}
|
||||
}
|
||||
}
|
||||
mp.l.Unlock()
|
||||
}
|
||||
|
||||
mp.l.Lock()
|
||||
if len(mp.expectations) > 0 {
|
||||
mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations))
|
||||
}
|
||||
mp.l.Unlock()
|
||||
|
||||
close(mp.closed)
|
||||
}()
|
||||
|
||||
return mp
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Implement Producer interface
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation.
|
||||
// By closing a mock producer, you also tell it that no more input will be provided, so it will
|
||||
// write an error to the test state if there's any remaining expectations.
|
||||
func (mp *AsyncProducer) AsyncClose() {
|
||||
close(mp.input)
|
||||
}
|
||||
|
||||
// Close corresponds with the Close method of sarama's Producer implementation.
|
||||
// By closing a mock producer, you also tell it that no more input will be provided, so it will
|
||||
// write an error to the test state if there's any remaining expectations.
|
||||
func (mp *AsyncProducer) Close() error {
|
||||
mp.AsyncClose()
|
||||
<-mp.closed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Input corresponds with the Input method of sarama's Producer implementation.
|
||||
// You have to set expectations on the mock producer before writing messages to the Input
|
||||
// channel, so it knows how to handle them. If there is no more remaining expectations and
|
||||
// a messages is written to the Input channel, the mock producer will write an error to the test
|
||||
// state object.
|
||||
func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage {
|
||||
return mp.input
|
||||
}
|
||||
|
||||
// Successes corresponds with the Successes method of sarama's Producer implementation.
|
||||
func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage {
|
||||
return mp.successes
|
||||
}
|
||||
|
||||
// Errors corresponds with the Errors method of sarama's Producer implementation.
|
||||
func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError {
|
||||
return mp.errors
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Setting expectations
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message
|
||||
// will be provided on the input channel. The mock producer will call the given function to check
|
||||
// the message value. If an error is returned it will be made available on the Errors channel
|
||||
// otherwise the mock will handle the message as if it produced successfully, i.e. it will make
|
||||
// it available on the Successes channel if the Producer.Return.Successes setting is set to true.
|
||||
func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) {
|
||||
mp.l.Lock()
|
||||
defer mp.l.Unlock()
|
||||
mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message
|
||||
// will be provided on the input channel. The mock producer will first call the given function to
|
||||
// check the message value. If an error is returned it will be made available on the Errors channel
|
||||
// otherwise the mock will handle the message as if it failed to produce successfully. This means
|
||||
// it will make a ProducerError available on the Errors channel.
|
||||
func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) {
|
||||
mp.l.Lock()
|
||||
defer mp.l.Unlock()
|
||||
mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided
|
||||
// on the input channel. The mock producer will handle the message as if it is produced successfully,
|
||||
// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting
|
||||
// is set to true.
|
||||
func (mp *AsyncProducer) ExpectInputAndSucceed() {
|
||||
mp.ExpectInputWithCheckerFunctionAndSucceed(nil)
|
||||
}
|
||||
|
||||
// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided
|
||||
// on the input channel. The mock producer will handle the message as if it failed to produce
|
||||
// successfully. This means it will make a ProducerError available on the Errors channel.
|
||||
func (mp *AsyncProducer) ExpectInputAndFail(err error) {
|
||||
mp.ExpectInputWithCheckerFunctionAndFail(nil, err)
|
||||
}
|
132
vendor/src/github.com/Shopify/sarama/mocks/async_producer_test.go
vendored
Normal file
132
vendor/src/github.com/Shopify/sarama/mocks/async_producer_test.go
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
func generateRegexpChecker(re string) func([]byte) error {
|
||||
return func(val []byte) error {
|
||||
matched, err := regexp.MatchString(re, string(val))
|
||||
if err != nil {
|
||||
return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error())
|
||||
}
|
||||
if !matched {
|
||||
return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type testReporterMock struct {
|
||||
errors []string
|
||||
}
|
||||
|
||||
func newTestReporterMock() *testReporterMock {
|
||||
return &testReporterMock{errors: make([]string, 0)}
|
||||
}
|
||||
|
||||
func (trm *testReporterMock) Errorf(format string, args ...interface{}) {
|
||||
trm.errors = append(trm.errors, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) {
|
||||
var mp interface{} = &AsyncProducer{}
|
||||
if _, ok := mp.(sarama.AsyncProducer); !ok {
|
||||
t.Error("The mock producer should implement the sarama.Producer interface.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerReturnsExpectationsToChannels(t *testing.T) {
|
||||
config := sarama.NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
mp := NewAsyncProducer(t, config)
|
||||
|
||||
mp.ExpectInputAndSucceed()
|
||||
mp.ExpectInputAndSucceed()
|
||||
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"}
|
||||
|
||||
msg1 := <-mp.Successes()
|
||||
msg2 := <-mp.Successes()
|
||||
err1 := <-mp.Errors()
|
||||
|
||||
if msg1.Topic != "test 1" {
|
||||
t.Error("Expected message 1 to be returned first")
|
||||
}
|
||||
|
||||
if msg2.Topic != "test 2" {
|
||||
t.Error("Expected message 2 to be returned second")
|
||||
}
|
||||
|
||||
if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected message 3 to be returned as error")
|
||||
}
|
||||
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerWithTooFewExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
mp := NewAsyncProducer(trm, nil)
|
||||
mp.ExpectInputAndSucceed()
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
|
||||
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerWithTooManyExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
mp := NewAsyncProducer(trm, nil)
|
||||
mp.ExpectInputAndSucceed()
|
||||
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProducerWithCheckerFunction(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
mp := NewAsyncProducer(trm, nil)
|
||||
mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
|
||||
mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
|
||||
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if err := mp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(mp.Errors()) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
|
||||
err1 := <-mp.Errors()
|
||||
if !strings.HasPrefix(err1.Err.Error(), "No match") {
|
||||
t.Error("Expected to report a value check error, found: ", err1.Err)
|
||||
}
|
||||
}
|
315
vendor/src/github.com/Shopify/sarama/mocks/consumer.go
vendored
Normal file
315
vendor/src/github.com/Shopify/sarama/mocks/consumer.go
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// Consumer implements sarama's Consumer interface for testing purposes.
|
||||
// Before you can start consuming from this consumer, you have to register
|
||||
// topic/partitions using ExpectConsumePartition, and set expectations on them.
|
||||
type Consumer struct {
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
config *sarama.Config
|
||||
partitionConsumers map[string]map[int32]*PartitionConsumer
|
||||
metadata map[string][]int32
|
||||
}
|
||||
|
||||
// NewConsumer returns a new mock Consumer instance. The t argument should
|
||||
// be the *testing.T instance of your test method. An error will be written to it if
|
||||
// an expectation is violated. The config argument is currently unused and can be set to nil.
|
||||
func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
|
||||
if config == nil {
|
||||
config = sarama.NewConfig()
|
||||
}
|
||||
|
||||
c := &Consumer{
|
||||
t: t,
|
||||
config: config,
|
||||
partitionConsumers: make(map[string]map[int32]*PartitionConsumer),
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// Consumer interface implementation
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface.
|
||||
// Before you can start consuming a partition, you have to set expectations on it using
|
||||
// ExpectConsumePartition. You can only consume a partition once per consumer.
|
||||
func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil {
|
||||
c.t.Errorf("No expectations set for %s/%d", topic, partition)
|
||||
return nil, errOutOfExpectations
|
||||
}
|
||||
|
||||
pc := c.partitionConsumers[topic][partition]
|
||||
if pc.consumed {
|
||||
return nil, sarama.ConfigurationError("The topic/partition is already being consumed")
|
||||
}
|
||||
|
||||
if pc.offset != AnyOffset && pc.offset != offset {
|
||||
c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset)
|
||||
}
|
||||
|
||||
pc.consumed = true
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// Topics returns a list of topics, as registered with SetMetadata
|
||||
func (c *Consumer) Topics() ([]string, error) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.metadata == nil {
|
||||
c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.")
|
||||
return nil, sarama.ErrOutOfBrokers
|
||||
}
|
||||
|
||||
var result []string
|
||||
for topic := range c.metadata {
|
||||
result = append(result, topic)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Partitions returns the list of parititons for the given topic, as registered with SetMetadata
|
||||
func (c *Consumer) Partitions(topic string) ([]int32, error) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.metadata == nil {
|
||||
c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.")
|
||||
return nil, sarama.ErrOutOfBrokers
|
||||
}
|
||||
if c.metadata[topic] == nil {
|
||||
return nil, sarama.ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
return c.metadata[topic], nil
|
||||
}
|
||||
|
||||
func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
hwms := make(map[string]map[int32]int64, len(c.partitionConsumers))
|
||||
for topic, partitionConsumers := range c.partitionConsumers {
|
||||
hwm := make(map[int32]int64, len(partitionConsumers))
|
||||
for partition, pc := range partitionConsumers {
|
||||
hwm[partition] = pc.HighWaterMarkOffset()
|
||||
}
|
||||
hwms[topic] = hwm
|
||||
}
|
||||
|
||||
return hwms
|
||||
}
|
||||
|
||||
// Close implements the Close method from the sarama.Consumer interface. It will close
|
||||
// all registered PartitionConsumer instances.
|
||||
func (c *Consumer) Close() error {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
for _, partitions := range c.partitionConsumers {
|
||||
for _, partitionConsumer := range partitions {
|
||||
_ = partitionConsumer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// Expectation API
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// SetTopicMetadata sets the clusters topic/partition metadata,
|
||||
// which will be returned by Topics() and Partitions().
|
||||
func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
c.metadata = metadata
|
||||
}
|
||||
|
||||
// ExpectConsumePartition will register a topic/partition, so you can set expectations on it.
|
||||
// The registered PartitionConsumer will be returned, so you can set expectations
|
||||
// on it using method chaining. Once a topic/partition is registered, you are
|
||||
// expected to start consuming it using ConsumePartition. If that doesn't happen,
|
||||
// an error will be written to the error reporter once the mock consumer is closed. It will
|
||||
// also expect that the
|
||||
func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer {
|
||||
c.l.Lock()
|
||||
defer c.l.Unlock()
|
||||
|
||||
if c.partitionConsumers[topic] == nil {
|
||||
c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer)
|
||||
}
|
||||
|
||||
if c.partitionConsumers[topic][partition] == nil {
|
||||
c.partitionConsumers[topic][partition] = &PartitionConsumer{
|
||||
t: c.t,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
offset: offset,
|
||||
messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize),
|
||||
errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize),
|
||||
}
|
||||
}
|
||||
|
||||
return c.partitionConsumers[topic][partition]
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// PartitionConsumer mock type
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes.
|
||||
// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is
|
||||
// registered first using the Consumer's ExpectConsumePartition method. Before consuming the
|
||||
// Errors and Messages channel, you should specify what values will be provided on these
|
||||
// channels using YieldMessage and YieldError.
|
||||
type PartitionConsumer struct {
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
topic string
|
||||
partition int32
|
||||
offset int64
|
||||
messages chan *sarama.ConsumerMessage
|
||||
errors chan *sarama.ConsumerError
|
||||
singleClose sync.Once
|
||||
consumed bool
|
||||
errorsShouldBeDrained bool
|
||||
messagesShouldBeDrained bool
|
||||
highWaterMarkOffset int64
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// PartitionConsumer interface implementation
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface.
|
||||
func (pc *PartitionConsumer) AsyncClose() {
|
||||
pc.singleClose.Do(func() {
|
||||
close(pc.messages)
|
||||
close(pc.errors)
|
||||
})
|
||||
}
|
||||
|
||||
// Close implements the Close method from the sarama.PartitionConsumer interface. It will
|
||||
// verify whether the partition consumer was actually started.
|
||||
func (pc *PartitionConsumer) Close() error {
|
||||
if !pc.consumed {
|
||||
pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition)
|
||||
return errPartitionConsumerNotStarted
|
||||
}
|
||||
|
||||
if pc.errorsShouldBeDrained && len(pc.errors) > 0 {
|
||||
pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors))
|
||||
}
|
||||
|
||||
if pc.messagesShouldBeDrained && len(pc.messages) > 0 {
|
||||
pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages))
|
||||
}
|
||||
|
||||
pc.AsyncClose()
|
||||
|
||||
var (
|
||||
closeErr error
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
var errs = make(sarama.ConsumerErrors, 0)
|
||||
for err := range pc.errors {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
closeErr = errs
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for _ = range pc.messages {
|
||||
// drain
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return closeErr
|
||||
}
|
||||
|
||||
// Errors implements the Errors method from the sarama.PartitionConsumer interface.
|
||||
func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError {
|
||||
return pc.errors
|
||||
}
|
||||
|
||||
// Messages implements the Messages method from the sarama.PartitionConsumer interface.
|
||||
func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
|
||||
return pc.messages
|
||||
}
|
||||
|
||||
func (pc *PartitionConsumer) HighWaterMarkOffset() int64 {
|
||||
return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// Expectation API
|
||||
///////////////////////////////////////////////////
|
||||
|
||||
// YieldMessage will yield a messages Messages channel of this partition consumer
|
||||
// when it is consumed. By default, the mock consumer will not verify whether this
|
||||
// message was consumed from the Messages channel, because there are legitimate
|
||||
// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will
|
||||
// verify that the channel is empty on close.
|
||||
func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) {
|
||||
pc.l.Lock()
|
||||
defer pc.l.Unlock()
|
||||
|
||||
msg.Topic = pc.topic
|
||||
msg.Partition = pc.partition
|
||||
msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1)
|
||||
|
||||
pc.messages <- msg
|
||||
}
|
||||
|
||||
// YieldError will yield an error on the Errors channel of this partition consumer
|
||||
// when it is consumed. By default, the mock consumer will not verify whether this error was
|
||||
// consumed from the Errors channel, because there are legitimate reasons for this
|
||||
// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that
|
||||
// the channel is empty on close.
|
||||
func (pc *PartitionConsumer) YieldError(err error) {
|
||||
pc.errors <- &sarama.ConsumerError{
|
||||
Topic: pc.topic,
|
||||
Partition: pc.partition,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer
|
||||
// that the messages channel will be fully drained when Close is called. If this
|
||||
// expectation is not met, an error is reported to the error reporter.
|
||||
func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() {
|
||||
pc.messagesShouldBeDrained = true
|
||||
}
|
||||
|
||||
// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer
|
||||
// that the errors channel will be fully drained when Close is called. If this
|
||||
// expectation is not met, an error is reported to the error reporter.
|
||||
func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() {
|
||||
pc.errorsShouldBeDrained = true
|
||||
}
|
249
vendor/src/github.com/Shopify/sarama/mocks/consumer_test.go
vendored
Normal file
249
vendor/src/github.com/Shopify/sarama/mocks/consumer_test.go
vendored
Normal file
@ -0,0 +1,249 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
func TestMockConsumerImplementsConsumerInterface(t *testing.T) {
|
||||
var c interface{} = &Consumer{}
|
||||
if _, ok := c.(sarama.Consumer); !ok {
|
||||
t.Error("The mock consumer should implement the sarama.Consumer interface.")
|
||||
}
|
||||
|
||||
var pc interface{} = &PartitionConsumer{}
|
||||
if _, ok := pc.(sarama.PartitionConsumer); !ok {
|
||||
t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerHandlesExpectations(t *testing.T) {
|
||||
consumer := NewConsumer(t, nil)
|
||||
defer func() {
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
|
||||
consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")})
|
||||
consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")})
|
||||
|
||||
pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
test0_msg := <-pc_test0.Messages()
|
||||
if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" {
|
||||
t.Error("Message was not as expected:", test0_msg)
|
||||
}
|
||||
test0_err := <-pc_test0.Errors()
|
||||
if test0_err.Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err)
|
||||
}
|
||||
|
||||
pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
test1_msg := <-pc_test1.Messages()
|
||||
if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" {
|
||||
t.Error("Message was not as expected:", test1_msg)
|
||||
}
|
||||
|
||||
pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
other0_msg := <-pc_other0.Messages()
|
||||
if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" {
|
||||
t.Error("Message was not as expected:", other0_msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) {
|
||||
consumer := NewConsumer(t, nil)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
|
||||
|
||||
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-pc.Messages():
|
||||
t.Error("Did not epxect a message on the messages channel.")
|
||||
case err := <-pc.Errors():
|
||||
if err.Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
|
||||
}
|
||||
}
|
||||
|
||||
errs := pc.Close().(sarama.ConsumerErrors)
|
||||
if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerWithoutExpectationsOnPartition(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
_, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
|
||||
if err != errOutOfExpectations {
|
||||
t.Error("Expected ConsumePartition to return errOutOfExpectations")
|
||||
}
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error("No error expected on close, but found:", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error("No error expected on close, but found:", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerWithWrongOffsetExpectation(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
|
||||
_, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest)
|
||||
if err != nil {
|
||||
t.Error("Did not expect error, found:", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
|
||||
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
|
||||
pcmock.ExpectMessagesDrainedOnClose()
|
||||
|
||||
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// consume first message, not second one
|
||||
<-pc.Messages()
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
pcmock.YieldError(sarama.ErrInvalidMessage)
|
||||
pcmock.YieldError(sarama.ErrInvalidMessage)
|
||||
pcmock.ExpectErrorsDrainedOnClose()
|
||||
|
||||
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// consume first and second error,
|
||||
<-pc.Errors()
|
||||
<-pc.Errors()
|
||||
|
||||
if err := consumer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 0 {
|
||||
t.Errorf("Expected no expectation failures to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerTopicMetadata(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
consumer.SetTopicMetadata(map[string][]int32{
|
||||
"test1": []int32{0, 1, 2, 3},
|
||||
"test2": []int32{0, 1, 2, 3, 4, 5, 6, 7},
|
||||
})
|
||||
|
||||
topics, err := consumer.Topics()
|
||||
if err != nil {
|
||||
t.Error(t)
|
||||
}
|
||||
|
||||
sortedTopics := sort.StringSlice(topics)
|
||||
sortedTopics.Sort()
|
||||
if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" {
|
||||
t.Error("Unexpected topics returned:", sortedTopics)
|
||||
}
|
||||
|
||||
partitions1, err := consumer.Partitions("test1")
|
||||
if err != nil {
|
||||
t.Error(t)
|
||||
}
|
||||
|
||||
if len(partitions1) != 4 {
|
||||
t.Error("Unexpected partitions returned:", len(partitions1))
|
||||
}
|
||||
|
||||
partitions2, err := consumer.Partitions("test2")
|
||||
if err != nil {
|
||||
t.Error(t)
|
||||
}
|
||||
|
||||
if len(partitions2) != 8 {
|
||||
t.Error("Unexpected partitions returned:", len(partitions2))
|
||||
}
|
||||
|
||||
if len(trm.errors) != 0 {
|
||||
t.Errorf("Expected no expectation failures to be set on the error reporter.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerUnexpectedTopicMetadata(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
consumer := NewConsumer(trm, nil)
|
||||
|
||||
if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers {
|
||||
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Errorf("Expected an expectation failure to be set on the error reporter.")
|
||||
}
|
||||
}
|
48
vendor/src/github.com/Shopify/sarama/mocks/mocks.go
vendored
Normal file
48
vendor/src/github.com/Shopify/sarama/mocks/mocks.go
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
Package mocks provides mocks that can be used for testing applications
|
||||
that use Sarama. The mock types provided by this package implement the
|
||||
interfaces Sarama exports, so you can use them for dependency injection
|
||||
in your tests.
|
||||
|
||||
All mock instances require you to set expectations on them before you
|
||||
can use them. It will determine how the mock will behave. If an
|
||||
expectation is not met, it will make your test fail.
|
||||
|
||||
NOTE: this package currently does not fall under the API stability
|
||||
guarantee of Sarama as it is still considered experimental.
|
||||
*/
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// ErrorReporter is a simple interface that includes the testing.T methods we use to report
|
||||
// expectation violations when using the mock objects.
|
||||
type ErrorReporter interface {
|
||||
Errorf(string, ...interface{})
|
||||
}
|
||||
|
||||
// ValueChecker is a function type to be set in each expectation of the producer mocks
|
||||
// to check the value passed.
|
||||
type ValueChecker func(val []byte) error
|
||||
|
||||
var (
|
||||
errProduceSuccess error = nil
|
||||
errOutOfExpectations = errors.New("No more expectations set on mock")
|
||||
errPartitionConsumerNotStarted = errors.New("The partition consumer was never started")
|
||||
)
|
||||
|
||||
const AnyOffset int64 = -1000
|
||||
|
||||
type producerExpectation struct {
|
||||
Result error
|
||||
CheckFunction ValueChecker
|
||||
}
|
||||
|
||||
type consumerExpectation struct {
|
||||
Err error
|
||||
Msg *sarama.ConsumerMessage
|
||||
}
|
148
vendor/src/github.com/Shopify/sarama/mocks/sync_producer.go
vendored
Normal file
148
vendor/src/github.com/Shopify/sarama/mocks/sync_producer.go
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
// SyncProducer implements sarama's SyncProducer interface for testing purposes.
|
||||
// Before you can use it, you have to set expectations on the mock SyncProducer
|
||||
// to tell it how to handle calls to SendMessage, so you can easily test success
|
||||
// and failure scenarios.
|
||||
type SyncProducer struct {
|
||||
l sync.Mutex
|
||||
t ErrorReporter
|
||||
expectations []*producerExpectation
|
||||
lastOffset int64
|
||||
}
|
||||
|
||||
// NewSyncProducer instantiates a new SyncProducer mock. The t argument should
|
||||
// be the *testing.T instance of your test method. An error will be written to it if
|
||||
// an expectation is violated. The config argument is currently unused, but is
|
||||
// maintained to be compatible with the async Producer.
|
||||
func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer {
|
||||
return &SyncProducer{
|
||||
t: t,
|
||||
expectations: make([]*producerExpectation, 0),
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Implement SyncProducer interface
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation.
|
||||
// You have to set expectations on the mock producer before calling SendMessage, so it knows
|
||||
// how to handle them. You can set a function in each expectation so that the message value
|
||||
// checked by this function and an error is returned if the match fails.
|
||||
// If there is no more remaining expectation when SendMessage is called,
|
||||
// the mock producer will write an error to the test state object.
|
||||
func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
|
||||
if len(sp.expectations) > 0 {
|
||||
expectation := sp.expectations[0]
|
||||
sp.expectations = sp.expectations[1:]
|
||||
if expectation.CheckFunction != nil {
|
||||
if val, err := msg.Value.Encode(); err != nil {
|
||||
sp.t.Errorf("Input message encoding failed: %s", err.Error())
|
||||
return -1, -1, err
|
||||
} else {
|
||||
err := expectation.CheckFunction(val)
|
||||
if err != nil {
|
||||
sp.t.Errorf("Check function returned an error: %s", err.Error())
|
||||
return -1, -1, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if expectation.Result == errProduceSuccess {
|
||||
sp.lastOffset++
|
||||
msg.Offset = sp.lastOffset
|
||||
return 0, msg.Offset, nil
|
||||
} else {
|
||||
return -1, -1, expectation.Result
|
||||
}
|
||||
} else {
|
||||
sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
|
||||
return -1, -1, errOutOfExpectations
|
||||
}
|
||||
}
|
||||
|
||||
// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation.
|
||||
// You have to set expectations on the mock producer before calling SendMessages, so it knows
|
||||
// how to handle them. If there is no more remaining expectations when SendMessages is called,
|
||||
// the mock producer will write an error to the test state object.
|
||||
func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
|
||||
if len(sp.expectations) >= len(msgs) {
|
||||
expectations := sp.expectations[0 : len(msgs)-1]
|
||||
sp.expectations = sp.expectations[len(msgs):]
|
||||
|
||||
for _, expectation := range expectations {
|
||||
if expectation.Result != errProduceSuccess {
|
||||
return expectation.Result
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.")
|
||||
return errOutOfExpectations
|
||||
}
|
||||
}
|
||||
|
||||
// Close corresponds with the Close method of sarama's SyncProducer implementation.
|
||||
// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
|
||||
// so it will write an error to the test state if there's any remaining expectations.
|
||||
func (sp *SyncProducer) Close() error {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
|
||||
if len(sp.expectations) > 0 {
|
||||
sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Setting expectations
|
||||
////////////////////////////////////////////////
|
||||
|
||||
// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage
|
||||
// will be called. The mock producer will first call the given function to check the message value.
|
||||
// It will cascade the error of the function, if any, or handle the message as if it produced
|
||||
// successfully, i.e. by returning a valid partition, and offset, and a nil error.
|
||||
func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
|
||||
// called. The mock producer will first call the given function to check the message value.
|
||||
// It will cascade the error of the function, if any, or handle the message as if it failed
|
||||
// to produce successfully, i.e. by returning the provided error.
|
||||
func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) {
|
||||
sp.l.Lock()
|
||||
defer sp.l.Unlock()
|
||||
sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
|
||||
}
|
||||
|
||||
// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be
|
||||
// called. The mock producer will handle the message as if it produced successfully, i.e. by
|
||||
// returning a valid partition, and offset, and a nil error.
|
||||
func (sp *SyncProducer) ExpectSendMessageAndSucceed() {
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil)
|
||||
}
|
||||
|
||||
// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
|
||||
// called. The mock producer will handle the message as if it failed to produce
|
||||
// successfully, i.e. by returning the provided error.
|
||||
func (sp *SyncProducer) ExpectSendMessageAndFail(err error) {
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err)
|
||||
}
|
124
vendor/src/github.com/Shopify/sarama/mocks/sync_producer_test.go
vendored
Normal file
124
vendor/src/github.com/Shopify/sarama/mocks/sync_producer_test.go
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
)
|
||||
|
||||
func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) {
|
||||
var mp interface{} = &SyncProducer{}
|
||||
if _, ok := mp.(sarama.SyncProducer); !ok {
|
||||
t.Error("The mock async producer should implement the sarama.SyncProducer interface.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) {
|
||||
sp := NewSyncProducer(t, nil)
|
||||
defer func() {
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
|
||||
_, offset, err := sp.SendMessage(msg)
|
||||
if err != nil {
|
||||
t.Errorf("The first message should have been produced successfully, but got %s", err)
|
||||
}
|
||||
if offset != 1 || offset != msg.Offset {
|
||||
t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset)
|
||||
}
|
||||
|
||||
_, offset, err = sp.SendMessage(msg)
|
||||
if err != nil {
|
||||
t.Errorf("The second message should have been produced successfully, but got %s", err)
|
||||
}
|
||||
if offset != 2 || offset != msg.Offset {
|
||||
t.Errorf("The second message should have been assigned offset 2, but got %d", offset)
|
||||
}
|
||||
|
||||
_, _, err = sp.SendMessage(msg)
|
||||
if err != sarama.ErrOutOfBrokers {
|
||||
t.Errorf("The third message should not have been produced successfully")
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerWithTooManyExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
|
||||
sp := NewSyncProducer(trm, nil)
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err != nil {
|
||||
t.Error("No error expected on first SendMessage call", err)
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerWithTooFewExpectations(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
|
||||
sp := NewSyncProducer(trm, nil)
|
||||
sp.ExpectSendMessageAndSucceed()
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err != nil {
|
||||
t.Error("No error expected on first SendMessage call", err)
|
||||
}
|
||||
if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations {
|
||||
t.Error("errOutOfExpectations expected on second SendMessage call, found:", err)
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncProducerWithCheckerFunction(t *testing.T) {
|
||||
trm := newTestReporterMock()
|
||||
|
||||
sp := NewSyncProducer(trm, nil)
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
|
||||
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
|
||||
|
||||
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err != nil {
|
||||
t.Error("No error expected on first SendMessage call, found: ", err)
|
||||
}
|
||||
msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
|
||||
if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") {
|
||||
t.Error("Error during value check expected on second SendMessage call, found:", err)
|
||||
}
|
||||
|
||||
if err := sp.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if len(trm.errors) != 1 {
|
||||
t.Error("Expected to report an error")
|
||||
}
|
||||
}
|
190
vendor/src/github.com/Shopify/sarama/offset_commit_request.go
vendored
Normal file
190
vendor/src/github.com/Shopify/sarama/offset_commit_request.go
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
package sarama
|
||||
|
||||
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
|
||||
// tells the broker to set the timestamp to the time at which the request was received.
|
||||
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
|
||||
const ReceiveTime int64 = -1
|
||||
|
||||
// GroupGenerationUndefined is a special value for the group generation field of
|
||||
// Offset Commit Requests that should be used when a consumer group does not rely
|
||||
// on Kafka for partition management.
|
||||
const GroupGenerationUndefined = -1
|
||||
|
||||
type offsetCommitRequestBlock struct {
|
||||
offset int64
|
||||
timestamp int64
|
||||
metadata string
|
||||
}
|
||||
|
||||
func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt64(b.offset)
|
||||
if version == 1 {
|
||||
pe.putInt64(b.timestamp)
|
||||
} else if b.timestamp != 0 {
|
||||
Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
|
||||
}
|
||||
|
||||
return pe.putString(b.metadata)
|
||||
}
|
||||
|
||||
func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
if b.offset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if version == 1 {
|
||||
if b.timestamp, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.metadata, err = pd.getString()
|
||||
return err
|
||||
}
|
||||
|
||||
type OffsetCommitRequest struct {
|
||||
ConsumerGroup string
|
||||
ConsumerGroupGeneration int32 // v1 or later
|
||||
ConsumerID string // v1 or later
|
||||
RetentionTime int64 // v2 or later
|
||||
|
||||
// Version can be:
|
||||
// - 0 (kafka 0.8.1 and later)
|
||||
// - 1 (kafka 0.8.2 and later)
|
||||
// - 2 (kafka 0.9.0 and later)
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetCommitRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
|
||||
if r.Version < 0 || r.Version > 2 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
|
||||
}
|
||||
|
||||
if err := pe.putString(r.ConsumerGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(r.ConsumerGroupGeneration)
|
||||
if err := pe.putString(r.ConsumerID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if r.ConsumerGroupGeneration != 0 {
|
||||
Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
|
||||
}
|
||||
if r.ConsumerID != "" {
|
||||
Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 2 {
|
||||
pe.putInt64(r.RetentionTime)
|
||||
} else if r.RetentionTime != 0 {
|
||||
Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.blocks {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.ConsumerGroup, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.ConsumerID, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 2 {
|
||||
if r.RetentionTime, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &offsetCommitRequestBlock{}
|
||||
if err := block.decode(pd, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = block
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) key() int16 {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
case 2:
|
||||
return V0_9_0_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
|
||||
}
|
90
vendor/src/github.com/Shopify/sarama/offset_commit_request_test.go
vendored
Normal file
90
vendor/src/github.com/Shopify/sarama/offset_commit_request_test.go
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
offsetCommitRequestNoBlocksV0 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetCommitRequestNoBlocksV1 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetCommitRequestNoBlocksV2 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetCommitRequestOneBlockV0 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x52, 0x21,
|
||||
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
|
||||
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
|
||||
|
||||
offsetCommitRequestOneBlockV1 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x52, 0x21,
|
||||
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
|
||||
|
||||
offsetCommitRequestOneBlockV2 = []byte{
|
||||
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x11, 0x22,
|
||||
0x00, 0x04, 'c', 'o', 'n', 's',
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x52, 0x21,
|
||||
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
|
||||
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
|
||||
)
|
||||
|
||||
func TestOffsetCommitRequestV0(t *testing.T) {
|
||||
request := new(OffsetCommitRequest)
|
||||
request.Version = 0
|
||||
request.ConsumerGroup = "foobar"
|
||||
testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0)
|
||||
|
||||
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
|
||||
testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0)
|
||||
}
|
||||
|
||||
func TestOffsetCommitRequestV1(t *testing.T) {
|
||||
request := new(OffsetCommitRequest)
|
||||
request.ConsumerGroup = "foobar"
|
||||
request.ConsumerID = "cons"
|
||||
request.ConsumerGroupGeneration = 0x1122
|
||||
request.Version = 1
|
||||
testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1)
|
||||
|
||||
request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata")
|
||||
testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1)
|
||||
}
|
||||
|
||||
func TestOffsetCommitRequestV2(t *testing.T) {
|
||||
request := new(OffsetCommitRequest)
|
||||
request.ConsumerGroup = "foobar"
|
||||
request.ConsumerID = "cons"
|
||||
request.ConsumerGroupGeneration = 0x1122
|
||||
request.RetentionTime = 0x4433
|
||||
request.Version = 2
|
||||
testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2)
|
||||
|
||||
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
|
||||
testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2)
|
||||
}
|
85
vendor/src/github.com/Shopify/sarama/offset_commit_response.go
vendored
Normal file
85
vendor/src/github.com/Shopify/sarama/offset_commit_response.go
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package sarama
|
||||
|
||||
type OffsetCommitResponse struct {
|
||||
Errors map[string]map[int32]KError
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
|
||||
if r.Errors == nil {
|
||||
r.Errors = make(map[string]map[int32]KError)
|
||||
}
|
||||
partitions := r.Errors[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]KError)
|
||||
r.Errors[topic] = partitions
|
||||
}
|
||||
partitions[partition] = kerror
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Errors)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Errors {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, kerror := range partitions {
|
||||
pe.putInt32(partition)
|
||||
pe.putInt16(int16(kerror))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors = make(map[string]map[int32]KError, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numErrors, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors[name] = make(map[int32]KError, numErrors)
|
||||
|
||||
for j := 0; j < numErrors; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Errors[name][id] = KError(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) key() int16 {
|
||||
return 8
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
24
vendor/src/github.com/Shopify/sarama/offset_commit_response_test.go
vendored
Normal file
24
vendor/src/github.com/Shopify/sarama/offset_commit_response_test.go
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyOffsetCommitResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestEmptyOffsetCommitResponse(t *testing.T) {
|
||||
response := OffsetCommitResponse{}
|
||||
testResponse(t, "empty", &response, emptyOffsetCommitResponse)
|
||||
}
|
||||
|
||||
func TestNormalOffsetCommitResponse(t *testing.T) {
|
||||
response := OffsetCommitResponse{}
|
||||
response.AddError("t", 0, ErrNotLeaderForPartition)
|
||||
response.Errors["m"] = make(map[int32]KError)
|
||||
// The response encoded form cannot be checked for it varies due to
|
||||
// unpredictable map traversal order.
|
||||
testResponse(t, "normal", &response, nil)
|
||||
}
|
81
vendor/src/github.com/Shopify/sarama/offset_fetch_request.go
vendored
Normal file
81
vendor/src/github.com/Shopify/sarama/offset_fetch_request.go
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package sarama
|
||||
|
||||
type OffsetFetchRequest struct {
|
||||
ConsumerGroup string
|
||||
Version int16
|
||||
partitions map[string][]int32
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
|
||||
if r.Version < 0 || r.Version > 1 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
|
||||
}
|
||||
|
||||
if err = pe.putString(r.ConsumerGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putArrayLength(len(r.partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.partitions {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
if r.ConsumerGroup, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.partitions = make(map[string][]int32)
|
||||
for i := 0; i < partitionCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitions, err := pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.partitions[topic] = partitions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) key() int16 {
|
||||
return 9
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
|
||||
if r.partitions == nil {
|
||||
r.partitions = make(map[string][]int32)
|
||||
}
|
||||
|
||||
r.partitions[topic] = append(r.partitions[topic], partitionID)
|
||||
}
|
31
vendor/src/github.com/Shopify/sarama/offset_fetch_request_test.go
vendored
Normal file
31
vendor/src/github.com/Shopify/sarama/offset_fetch_request_test.go
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
offsetFetchRequestNoGroupNoPartitions = []byte{
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetFetchRequestNoPartitions = []byte{
|
||||
0x00, 0x04, 'b', 'l', 'a', 'h',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetFetchRequestOnePartition = []byte{
|
||||
0x00, 0x04, 'b', 'l', 'a', 'h',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x4F, 0x4F, 0x4F, 0x4F}
|
||||
)
|
||||
|
||||
func TestOffsetFetchRequest(t *testing.T) {
|
||||
request := new(OffsetFetchRequest)
|
||||
testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions)
|
||||
|
||||
request.ConsumerGroup = "blah"
|
||||
testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions)
|
||||
|
||||
request.AddPartition("topicTheFirst", 0x4F4F4F4F)
|
||||
testRequest(t, "one partition", request, offsetFetchRequestOnePartition)
|
||||
}
|
143
vendor/src/github.com/Shopify/sarama/offset_fetch_response.go
vendored
Normal file
143
vendor/src/github.com/Shopify/sarama/offset_fetch_response.go
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
package sarama
|
||||
|
||||
type OffsetFetchResponseBlock struct {
|
||||
Offset int64
|
||||
Metadata string
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Metadata, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
err = pe.putString(b.Metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetFetchResponse struct {
|
||||
Blocks map[string]map[int32]*OffsetFetchResponseBlock
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.Blocks {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numBlocks == 0 {
|
||||
r.Blocks[name] = nil
|
||||
continue
|
||||
}
|
||||
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetFetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) key() int16 {
|
||||
return 9
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
|
||||
}
|
||||
partitions := r.Blocks[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*OffsetFetchResponseBlock)
|
||||
r.Blocks[topic] = partitions
|
||||
}
|
||||
partitions[partition] = block
|
||||
}
|
22
vendor/src/github.com/Shopify/sarama/offset_fetch_response_test.go
vendored
Normal file
22
vendor/src/github.com/Shopify/sarama/offset_fetch_response_test.go
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyOffsetFetchResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
)
|
||||
|
||||
func TestEmptyOffsetFetchResponse(t *testing.T) {
|
||||
response := OffsetFetchResponse{}
|
||||
testResponse(t, "empty", &response, emptyOffsetFetchResponse)
|
||||
}
|
||||
|
||||
func TestNormalOffsetFetchResponse(t *testing.T) {
|
||||
response := OffsetFetchResponse{}
|
||||
response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut})
|
||||
response.Blocks["m"] = nil
|
||||
// The response encoded form cannot be checked for it varies due to
|
||||
// unpredictable map traversal order.
|
||||
testResponse(t, "normal", &response, nil)
|
||||
}
|
542
vendor/src/github.com/Shopify/sarama/offset_manager.go
vendored
Normal file
542
vendor/src/github.com/Shopify/sarama/offset_manager.go
vendored
Normal file
@ -0,0 +1,542 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Offset Manager
|
||||
|
||||
// OffsetManager uses Kafka to store and fetch consumed partition offsets.
|
||||
type OffsetManager interface {
|
||||
// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
|
||||
// It will return an error if this OffsetManager is already managing the given
|
||||
// topic/partition.
|
||||
ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
|
||||
|
||||
// Close stops the OffsetManager from managing offsets. It is required to call
|
||||
// this function before an OffsetManager object passes out of scope, as it
|
||||
// will otherwise leak memory. You must call this after all the
|
||||
// PartitionOffsetManagers are closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type offsetManager struct {
|
||||
client Client
|
||||
conf *Config
|
||||
group string
|
||||
|
||||
lock sync.Mutex
|
||||
poms map[string]map[int32]*partitionOffsetManager
|
||||
boms map[*Broker]*brokerOffsetManager
|
||||
}
|
||||
|
||||
// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
|
||||
// It is still necessary to call Close() on the underlying client when finished with the partition manager.
|
||||
func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
om := &offsetManager{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
group: group,
|
||||
poms: make(map[string]map[int32]*partitionOffsetManager),
|
||||
boms: make(map[*Broker]*brokerOffsetManager),
|
||||
}
|
||||
|
||||
return om, nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
|
||||
pom, err := om.newPartitionOffsetManager(topic, partition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
topicManagers := om.poms[topic]
|
||||
if topicManagers == nil {
|
||||
topicManagers = make(map[int32]*partitionOffsetManager)
|
||||
om.poms[topic] = topicManagers
|
||||
}
|
||||
|
||||
if topicManagers[partition] != nil {
|
||||
return nil, ConfigurationError("That topic/partition is already being managed")
|
||||
}
|
||||
|
||||
topicManagers[partition] = pom
|
||||
return pom, nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
bom := om.boms[broker]
|
||||
if bom == nil {
|
||||
bom = om.newBrokerOffsetManager(broker)
|
||||
om.boms[broker] = bom
|
||||
}
|
||||
|
||||
bom.refs++
|
||||
|
||||
return bom
|
||||
}
|
||||
|
||||
func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
bom.refs--
|
||||
|
||||
if bom.refs == 0 {
|
||||
close(bom.updateSubscriptions)
|
||||
if om.boms[bom.broker] == bom {
|
||||
delete(om.boms, bom.broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
delete(om.boms, bom.broker)
|
||||
}
|
||||
|
||||
func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
|
||||
om.lock.Lock()
|
||||
defer om.lock.Unlock()
|
||||
|
||||
delete(om.poms[pom.topic], pom.partition)
|
||||
if len(om.poms[pom.topic]) == 0 {
|
||||
delete(om.poms, pom.topic)
|
||||
}
|
||||
}
|
||||
|
||||
// Partition Offset Manager
|
||||
|
||||
// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
|
||||
// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
|
||||
// out of scope.
|
||||
type PartitionOffsetManager interface {
|
||||
// NextOffset returns the next offset that should be consumed for the managed
|
||||
// partition, accompanied by metadata which can be used to reconstruct the state
|
||||
// of the partition consumer when it resumes. NextOffset() will return
|
||||
// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
|
||||
// was committed for this partition yet.
|
||||
NextOffset() (int64, string)
|
||||
|
||||
// MarkOffset marks the provided offset, alongside a metadata string
|
||||
// that represents the state of the partition consumer at that point in time. The
|
||||
// metadata string can be used by another consumer to restore that state, so it
|
||||
// can resume consumption.
|
||||
//
|
||||
// To follow upstream conventions, you are expected to mark the offset of the
|
||||
// next message to read, not the last message read. Thus, when calling `MarkOffset`
|
||||
// you should typically add one to the offset of the last consumed message.
|
||||
//
|
||||
// Note: calling MarkOffset does not necessarily commit the offset to the backend
|
||||
// store immediately for efficiency reasons, and it may never be committed if
|
||||
// your application crashes. This means that you may end up processing the same
|
||||
// message twice, and your processing should ideally be idempotent.
|
||||
MarkOffset(offset int64, metadata string)
|
||||
|
||||
// Errors returns a read channel of errors that occur during offset management, if
|
||||
// enabled. By default, errors are logged and not returned over this channel. If
|
||||
// you want to implement any custom error handling, set your config's
|
||||
// Consumer.Return.Errors setting to true, and read from this channel.
|
||||
Errors() <-chan *ConsumerError
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
|
||||
// return immediately, after which you should wait until the 'errors' channel has
|
||||
// been drained and closed. It is required to call this function, or Close before
|
||||
// a consumer object passes out of scope, as it will otherwise leak memory. You
|
||||
// must call this before calling Close on the underlying client.
|
||||
AsyncClose()
|
||||
|
||||
// Close stops the PartitionOffsetManager from managing offsets. It is required to
|
||||
// call this function (or AsyncClose) before a PartitionOffsetManager object
|
||||
// passes out of scope, as it will otherwise leak memory. You must call this
|
||||
// before calling Close on the underlying client.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type partitionOffsetManager struct {
|
||||
parent *offsetManager
|
||||
topic string
|
||||
partition int32
|
||||
|
||||
lock sync.Mutex
|
||||
offset int64
|
||||
metadata string
|
||||
dirty bool
|
||||
clean sync.Cond
|
||||
broker *brokerOffsetManager
|
||||
|
||||
errors chan *ConsumerError
|
||||
rebalance chan none
|
||||
dying chan none
|
||||
}
|
||||
|
||||
func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
|
||||
pom := &partitionOffsetManager{
|
||||
parent: om,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
|
||||
rebalance: make(chan none, 1),
|
||||
dying: make(chan none),
|
||||
}
|
||||
pom.clean.L = &pom.lock
|
||||
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pom.broker.updateSubscriptions <- pom
|
||||
|
||||
go withRecover(pom.mainLoop)
|
||||
|
||||
return pom, nil
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) mainLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-pom.rebalance:
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
pom.handleError(err)
|
||||
pom.rebalance <- none{}
|
||||
} else {
|
||||
pom.broker.updateSubscriptions <- pom
|
||||
}
|
||||
case <-pom.dying:
|
||||
if pom.broker != nil {
|
||||
select {
|
||||
case <-pom.rebalance:
|
||||
case pom.broker.updateSubscriptions <- pom:
|
||||
}
|
||||
pom.parent.unrefBrokerOffsetManager(pom.broker)
|
||||
}
|
||||
pom.parent.abandonPartitionOffsetManager(pom)
|
||||
close(pom.errors)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) selectBroker() error {
|
||||
if pom.broker != nil {
|
||||
pom.parent.unrefBrokerOffsetManager(pom.broker)
|
||||
pom.broker = nil
|
||||
}
|
||||
|
||||
var broker *Broker
|
||||
var err error
|
||||
|
||||
if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pom.broker = pom.parent.refBrokerOffsetManager(broker)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
|
||||
request := new(OffsetFetchRequest)
|
||||
request.Version = 1
|
||||
request.ConsumerGroup = pom.parent.group
|
||||
request.AddPartition(pom.topic, pom.partition)
|
||||
|
||||
response, err := pom.broker.broker.FetchOffset(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := response.GetBlock(pom.topic, pom.partition)
|
||||
if block == nil {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
case ErrNoError:
|
||||
pom.offset = block.Offset
|
||||
pom.metadata = block.Metadata
|
||||
return nil
|
||||
case ErrNotCoordinatorForConsumer:
|
||||
if retries <= 0 {
|
||||
return block.Err
|
||||
}
|
||||
if err := pom.selectBroker(); err != nil {
|
||||
return err
|
||||
}
|
||||
return pom.fetchInitialOffset(retries - 1)
|
||||
case ErrOffsetsLoadInProgress:
|
||||
if retries <= 0 {
|
||||
return block.Err
|
||||
}
|
||||
time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
|
||||
return pom.fetchInitialOffset(retries - 1)
|
||||
default:
|
||||
return block.Err
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) handleError(err error) {
|
||||
cErr := &ConsumerError{
|
||||
Topic: pom.topic,
|
||||
Partition: pom.partition,
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if pom.parent.conf.Consumer.Return.Errors {
|
||||
pom.errors <- cErr
|
||||
} else {
|
||||
Logger.Println(cErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
|
||||
return pom.errors
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if offset > pom.offset {
|
||||
pom.offset = offset
|
||||
pom.metadata = metadata
|
||||
pom.dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if pom.offset == offset && pom.metadata == metadata {
|
||||
pom.dirty = false
|
||||
pom.clean.Signal()
|
||||
}
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) NextOffset() (int64, string) {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
if pom.offset >= 0 {
|
||||
return pom.offset, pom.metadata
|
||||
}
|
||||
|
||||
return pom.parent.conf.Consumer.Offsets.Initial, ""
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) AsyncClose() {
|
||||
go func() {
|
||||
pom.lock.Lock()
|
||||
defer pom.lock.Unlock()
|
||||
|
||||
for pom.dirty {
|
||||
pom.clean.Wait()
|
||||
}
|
||||
|
||||
close(pom.dying)
|
||||
}()
|
||||
}
|
||||
|
||||
func (pom *partitionOffsetManager) Close() error {
|
||||
pom.AsyncClose()
|
||||
|
||||
var errors ConsumerErrors
|
||||
for err := range pom.errors {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Broker Offset Manager
|
||||
|
||||
type brokerOffsetManager struct {
|
||||
parent *offsetManager
|
||||
broker *Broker
|
||||
timer *time.Ticker
|
||||
updateSubscriptions chan *partitionOffsetManager
|
||||
subscriptions map[*partitionOffsetManager]none
|
||||
refs int
|
||||
}
|
||||
|
||||
func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
|
||||
bom := &brokerOffsetManager{
|
||||
parent: om,
|
||||
broker: broker,
|
||||
timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
|
||||
updateSubscriptions: make(chan *partitionOffsetManager),
|
||||
subscriptions: make(map[*partitionOffsetManager]none),
|
||||
}
|
||||
|
||||
go withRecover(bom.mainLoop)
|
||||
|
||||
return bom
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) mainLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-bom.timer.C:
|
||||
if len(bom.subscriptions) > 0 {
|
||||
bom.flushToBroker()
|
||||
}
|
||||
case s, ok := <-bom.updateSubscriptions:
|
||||
if !ok {
|
||||
bom.timer.Stop()
|
||||
return
|
||||
}
|
||||
if _, ok := bom.subscriptions[s]; ok {
|
||||
delete(bom.subscriptions, s)
|
||||
} else {
|
||||
bom.subscriptions[s] = none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) flushToBroker() {
|
||||
request := bom.constructRequest()
|
||||
if request == nil {
|
||||
return
|
||||
}
|
||||
|
||||
response, err := bom.broker.CommitOffset(request)
|
||||
|
||||
if err != nil {
|
||||
bom.abort(err)
|
||||
return
|
||||
}
|
||||
|
||||
for s := range bom.subscriptions {
|
||||
if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var err KError
|
||||
var ok bool
|
||||
|
||||
if response.Errors[s.topic] == nil {
|
||||
s.handleError(ErrIncompleteResponse)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
continue
|
||||
}
|
||||
if err, ok = response.Errors[s.topic][s.partition]; !ok {
|
||||
s.handleError(ErrIncompleteResponse)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
continue
|
||||
}
|
||||
|
||||
switch err {
|
||||
case ErrNoError:
|
||||
block := request.blocks[s.topic][s.partition]
|
||||
s.updateCommitted(block.offset, block.metadata)
|
||||
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
|
||||
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
|
||||
// not a critical error, we just need to redispatch
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
|
||||
// nothing we can do about this, just tell the user and carry on
|
||||
s.handleError(err)
|
||||
case ErrOffsetsLoadInProgress:
|
||||
// nothing wrong but we didn't commit, we'll get it next time round
|
||||
break
|
||||
case ErrUnknownTopicOrPartition:
|
||||
// let the user know *and* try redispatching - if topic-auto-create is
|
||||
// enabled, redispatching should trigger a metadata request and create the
|
||||
// topic; if not then re-dispatching won't help, but we've let the user
|
||||
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
|
||||
fallthrough
|
||||
default:
|
||||
// dunno, tell the user and try redispatching
|
||||
s.handleError(err)
|
||||
delete(bom.subscriptions, s)
|
||||
s.rebalance <- none{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
|
||||
var r *OffsetCommitRequest
|
||||
var perPartitionTimestamp int64
|
||||
if bom.parent.conf.Consumer.Offsets.Retention == 0 {
|
||||
perPartitionTimestamp = ReceiveTime
|
||||
r = &OffsetCommitRequest{
|
||||
Version: 1,
|
||||
ConsumerGroup: bom.parent.group,
|
||||
ConsumerGroupGeneration: GroupGenerationUndefined,
|
||||
}
|
||||
} else {
|
||||
r = &OffsetCommitRequest{
|
||||
Version: 2,
|
||||
RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
|
||||
ConsumerGroup: bom.parent.group,
|
||||
ConsumerGroupGeneration: GroupGenerationUndefined,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for s := range bom.subscriptions {
|
||||
s.lock.Lock()
|
||||
if s.dirty {
|
||||
r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(r.blocks) > 0 {
|
||||
return r
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bom *brokerOffsetManager) abort(err error) {
|
||||
_ = bom.broker.Close() // we don't care about the error this might return, we already have one
|
||||
bom.parent.abandonBroker(bom)
|
||||
|
||||
for pom := range bom.subscriptions {
|
||||
pom.handleError(err)
|
||||
pom.rebalance <- none{}
|
||||
}
|
||||
|
||||
for s := range bom.updateSubscriptions {
|
||||
if _, ok := bom.subscriptions[s]; !ok {
|
||||
s.handleError(err)
|
||||
s.rebalance <- none{}
|
||||
}
|
||||
}
|
||||
|
||||
bom.subscriptions = make(map[*partitionOffsetManager]none)
|
||||
}
|
369
vendor/src/github.com/Shopify/sarama/offset_manager_test.go
vendored
Normal file
369
vendor/src/github.com/Shopify/sarama/offset_manager_test.go
vendored
Normal file
@ -0,0 +1,369 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func initOffsetManager(t *testing.T) (om OffsetManager,
|
||||
testClient Client, broker, coordinator *MockBroker) {
|
||||
|
||||
config := NewConfig()
|
||||
config.Metadata.Retry.Max = 1
|
||||
config.Consumer.Offsets.CommitInterval = 1 * time.Millisecond
|
||||
config.Version = V0_9_0_0
|
||||
|
||||
broker = NewMockBroker(t, 1)
|
||||
coordinator = NewMockBroker(t, 2)
|
||||
|
||||
seedMeta := new(MetadataResponse)
|
||||
seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID())
|
||||
seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, ErrNoError)
|
||||
seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, ErrNoError)
|
||||
broker.Returns(seedMeta)
|
||||
|
||||
var err error
|
||||
testClient, err = NewClient([]string{broker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: coordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: coordinator.Port(),
|
||||
})
|
||||
|
||||
om, err = NewOffsetManagerFromClient("group", testClient)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return om, testClient, broker, coordinator
|
||||
}
|
||||
|
||||
func initPartitionOffsetManager(t *testing.T, om OffsetManager,
|
||||
coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager {
|
||||
|
||||
fetchResponse := new(OffsetFetchResponse)
|
||||
fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{
|
||||
Err: ErrNoError,
|
||||
Offset: initialOffset,
|
||||
Metadata: metadata,
|
||||
})
|
||||
coordinator.Returns(fetchResponse)
|
||||
|
||||
pom, err := om.ManagePartition("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return pom
|
||||
}
|
||||
|
||||
func TestNewOffsetManager(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
seedBroker.Returns(new(MetadataResponse))
|
||||
|
||||
testClient, err := NewClient([]string{seedBroker.Addr()}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = NewOffsetManagerFromClient("group", testClient)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
safeClose(t, testClient)
|
||||
|
||||
_, err = NewOffsetManagerFromClient("group", testClient)
|
||||
if err != ErrClosedClient {
|
||||
t.Errorf("Error expected for closed client; actual value: %v", err)
|
||||
}
|
||||
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
// Test recovery from ErrNotCoordinatorForConsumer
|
||||
// on first fetchInitialOffset call
|
||||
func TestOffsetManagerFetchInitialFail(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
|
||||
// Error on first fetchInitialOffset call
|
||||
responseBlock := OffsetFetchResponseBlock{
|
||||
Err: ErrNotCoordinatorForConsumer,
|
||||
Offset: 5,
|
||||
Metadata: "test_meta",
|
||||
}
|
||||
|
||||
fetchResponse := new(OffsetFetchResponse)
|
||||
fetchResponse.AddBlock("my_topic", 0, &responseBlock)
|
||||
coordinator.Returns(fetchResponse)
|
||||
|
||||
// Refresh coordinator
|
||||
newCoordinator := NewMockBroker(t, 3)
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Second fetchInitialOffset call is fine
|
||||
fetchResponse2 := new(OffsetFetchResponse)
|
||||
responseBlock2 := responseBlock
|
||||
responseBlock2.Err = ErrNoError
|
||||
fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
|
||||
newCoordinator.Returns(fetchResponse2)
|
||||
|
||||
pom, err := om.ManagePartition("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
newCoordinator.Close()
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
// Test fetchInitialOffset retry on ErrOffsetsLoadInProgress
|
||||
func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
|
||||
// Error on first fetchInitialOffset call
|
||||
responseBlock := OffsetFetchResponseBlock{
|
||||
Err: ErrOffsetsLoadInProgress,
|
||||
Offset: 5,
|
||||
Metadata: "test_meta",
|
||||
}
|
||||
|
||||
fetchResponse := new(OffsetFetchResponse)
|
||||
fetchResponse.AddBlock("my_topic", 0, &responseBlock)
|
||||
coordinator.Returns(fetchResponse)
|
||||
|
||||
// Second fetchInitialOffset call is fine
|
||||
fetchResponse2 := new(OffsetFetchResponse)
|
||||
responseBlock2 := responseBlock
|
||||
responseBlock2.Err = ErrNoError
|
||||
fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
|
||||
coordinator.Returns(fetchResponse2)
|
||||
|
||||
pom, err := om.ManagePartition("my_topic", 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerInitialOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
testClient.Config().Consumer.Offsets.Initial = OffsetOldest
|
||||
|
||||
// Kafka returns -1 if no offset has been stored for this partition yet.
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, -1, "")
|
||||
|
||||
offset, meta := pom.NextOffset()
|
||||
if offset != OffsetOldest {
|
||||
t.Errorf("Expected offset 5. Actual: %v", offset)
|
||||
}
|
||||
if meta != "" {
|
||||
t.Errorf("Expected metadata to be empty. Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerNextOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta")
|
||||
|
||||
offset, meta := pom.NextOffset()
|
||||
if offset != 5 {
|
||||
t.Errorf("Expected offset 5. Actual: %v", offset)
|
||||
}
|
||||
if meta != "test_meta" {
|
||||
t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
coordinator.Returns(ocResponse)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
offset, meta := pom.NextOffset()
|
||||
|
||||
if offset != 100 {
|
||||
t.Errorf("Expected offset 100. Actual: %v", offset)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
testClient.Config().Consumer.Offsets.Retention = time.Hour
|
||||
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
handler := func(req *request) (res encoder) {
|
||||
if req.body.version() != 2 {
|
||||
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
|
||||
}
|
||||
offsetCommitRequest := req.body.(*OffsetCommitRequest)
|
||||
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
|
||||
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
|
||||
}
|
||||
return ocResponse
|
||||
}
|
||||
coordinator.setHandler(handler)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
offset, meta := pom.NextOffset()
|
||||
|
||||
if offset != 100 {
|
||||
t.Errorf("Expected offset 100. Actual: %v", offset)
|
||||
}
|
||||
if meta != "modified_meta" {
|
||||
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
|
||||
}
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
}
|
||||
|
||||
func TestPartitionOffsetManagerCommitErr(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
|
||||
|
||||
// Error on one partition
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
|
||||
ocResponse.AddError("my_topic", 1, ErrNoError)
|
||||
coordinator.Returns(ocResponse)
|
||||
|
||||
newCoordinator := NewMockBroker(t, 3)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Nothing in response.Errors at all
|
||||
ocResponse2 := new(OffsetCommitResponse)
|
||||
newCoordinator.Returns(ocResponse2)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Error on the wrong partition for this pom
|
||||
ocResponse3 := new(OffsetCommitResponse)
|
||||
ocResponse3.AddError("my_topic", 1, ErrNoError)
|
||||
newCoordinator.Returns(ocResponse3)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block
|
||||
ocResponse4 := new(OffsetCommitResponse)
|
||||
ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition)
|
||||
newCoordinator.Returns(ocResponse4)
|
||||
|
||||
// For RefreshCoordinator()
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
// Normal error response
|
||||
ocResponse5 := new(OffsetCommitResponse)
|
||||
ocResponse5.AddError("my_topic", 0, ErrNoError)
|
||||
newCoordinator.Returns(ocResponse5)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
|
||||
err := pom.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
broker.Close()
|
||||
coordinator.Close()
|
||||
newCoordinator.Close()
|
||||
safeClose(t, om)
|
||||
safeClose(t, testClient)
|
||||
}
|
||||
|
||||
// Test of recovery from abort
|
||||
func TestAbortPartitionOffsetManager(t *testing.T) {
|
||||
om, testClient, broker, coordinator := initOffsetManager(t)
|
||||
pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
|
||||
|
||||
// this triggers an error in the CommitOffset request,
|
||||
// which leads to the abort call
|
||||
coordinator.Close()
|
||||
|
||||
// Response to refresh coordinator request
|
||||
newCoordinator := NewMockBroker(t, 3)
|
||||
broker.Returns(&ConsumerMetadataResponse{
|
||||
CoordinatorID: newCoordinator.BrokerID(),
|
||||
CoordinatorHost: "127.0.0.1",
|
||||
CoordinatorPort: newCoordinator.Port(),
|
||||
})
|
||||
|
||||
ocResponse := new(OffsetCommitResponse)
|
||||
ocResponse.AddError("my_topic", 0, ErrNoError)
|
||||
newCoordinator.Returns(ocResponse)
|
||||
|
||||
pom.MarkOffset(100, "modified_meta")
|
||||
|
||||
safeClose(t, pom)
|
||||
safeClose(t, om)
|
||||
broker.Close()
|
||||
safeClose(t, testClient)
|
||||
}
|
132
vendor/src/github.com/Shopify/sarama/offset_request.go
vendored
Normal file
132
vendor/src/github.com/Shopify/sarama/offset_request.go
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
package sarama
|
||||
|
||||
type offsetRequestBlock struct {
|
||||
time int64
|
||||
maxOffsets int32 // Only used in version 0
|
||||
}
|
||||
|
||||
func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt64(int64(b.time))
|
||||
if version == 0 {
|
||||
pe.putInt32(b.maxOffsets)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
if b.time, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if version == 0 {
|
||||
if b.maxOffsets, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetRequest struct {
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
err := pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.blocks {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err = block.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
|
||||
r.Version = version
|
||||
|
||||
// Ignore replica ID
|
||||
if _, err := pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
blockCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockCount == 0 {
|
||||
return nil
|
||||
}
|
||||
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
|
||||
for i := 0; i < blockCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &offsetRequestBlock{}
|
||||
if err := block.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = block
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) key() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
|
||||
}
|
||||
|
||||
tmp := new(offsetRequestBlock)
|
||||
tmp.time = time
|
||||
if r.Version == 0 {
|
||||
tmp.maxOffsets = maxOffsets
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = tmp
|
||||
}
|
43
vendor/src/github.com/Shopify/sarama/offset_request_test.go
vendored
Normal file
43
vendor/src/github.com/Shopify/sarama/offset_request_test.go
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
offsetRequestNoBlocks = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
offsetRequestOneBlock = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x02}
|
||||
|
||||
offsetRequestOneBlockV1 = []byte{
|
||||
0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
|
||||
)
|
||||
|
||||
func TestOffsetRequest(t *testing.T) {
|
||||
request := new(OffsetRequest)
|
||||
testRequest(t, "no blocks", request, offsetRequestNoBlocks)
|
||||
|
||||
request.AddBlock("foo", 4, 1, 2)
|
||||
testRequest(t, "one block", request, offsetRequestOneBlock)
|
||||
}
|
||||
|
||||
func TestOffsetRequestV1(t *testing.T) {
|
||||
request := new(OffsetRequest)
|
||||
request.Version = 1
|
||||
testRequest(t, "no blocks", request, offsetRequestNoBlocks)
|
||||
|
||||
request.AddBlock("bar", 4, 1, 2) // Last argument is ignored for V1
|
||||
testRequest(t, "one block", request, offsetRequestOneBlockV1)
|
||||
}
|
174
vendor/src/github.com/Shopify/sarama/offset_response.go
vendored
Normal file
174
vendor/src/github.com/Shopify/sarama/offset_response.go
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
package sarama
|
||||
|
||||
type OffsetResponseBlock struct {
|
||||
Err KError
|
||||
Offsets []int64 // Version 0
|
||||
Offset int64 // Version 1
|
||||
Timestamp int64 // Version 1
|
||||
}
|
||||
|
||||
func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Err = KError(tmp)
|
||||
|
||||
if version == 0 {
|
||||
b.Offsets, err = pd.getInt64Array()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
b.Timestamp, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For backwards compatibility put the offset in the offsets array too
|
||||
b.Offsets = []int64{b.Offset}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(int16(b.Err))
|
||||
|
||||
if version == 0 {
|
||||
return pe.putInt64Array(b.Offsets)
|
||||
}
|
||||
|
||||
pe.putInt64(b.Timestamp)
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type OffsetResponse struct {
|
||||
Version int16
|
||||
Blocks map[string]map[int32]*OffsetResponseBlock
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetResponseBlock)
|
||||
err = block.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
|
||||
if r.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Blocks[topic] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.Blocks[topic][partition]
|
||||
}
|
||||
|
||||
/*
|
||||
// [0 0 0 1 ntopics
|
||||
0 8 109 121 95 116 111 112 105 99 topic
|
||||
0 0 0 1 npartitions
|
||||
0 0 0 0 id
|
||||
0 0
|
||||
|
||||
0 0 0 1 0 0 0 0
|
||||
0 1 1 1 0 0 0 1
|
||||
0 8 109 121 95 116 111 112
|
||||
105 99 0 0 0 1 0 0
|
||||
0 0 0 0 0 0 0 1
|
||||
0 0 0 0 0 1 1 1] <nil>
|
||||
|
||||
*/
|
||||
func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
|
||||
if err = pe.putArrayLength(len(r.Blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, partitions := range r.Blocks {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putArrayLength(len(partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err = block.encode(pe, r.version()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) key() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
}
|
||||
}
|
||||
|
||||
// testing API
|
||||
|
||||
func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
|
||||
if r.Blocks == nil {
|
||||
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
|
||||
}
|
||||
byTopic, ok := r.Blocks[topic]
|
||||
if !ok {
|
||||
byTopic = make(map[int32]*OffsetResponseBlock)
|
||||
r.Blocks[topic] = byTopic
|
||||
}
|
||||
byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
|
||||
}
|
111
vendor/src/github.com/Shopify/sarama/offset_response_test.go
vendored
Normal file
111
vendor/src/github.com/Shopify/sarama/offset_response_test.go
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyOffsetResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
normalOffsetResponse = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x01, 'a',
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x01, 'z',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
|
||||
|
||||
normalOffsetResponseV1 = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x01, 'a',
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x01, 'z',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00,
|
||||
0x00, 0x00, 0x01, 0x58, 0x1A, 0xE6, 0x48, 0x86,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
|
||||
)
|
||||
|
||||
func TestEmptyOffsetResponse(t *testing.T) {
|
||||
response := OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0)
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
|
||||
}
|
||||
|
||||
response = OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 1)
|
||||
if len(response.Blocks) != 0 {
|
||||
t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalOffsetResponse(t *testing.T) {
|
||||
response := OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0)
|
||||
|
||||
if len(response.Blocks) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["a"]) != 0 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["z"]) != 1 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Err != ErrNoError {
|
||||
t.Fatal("Decoding produced invalid error for topic z partition 2.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["z"][2].Offsets) != 2 {
|
||||
t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
|
||||
t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalOffsetResponseV1(t *testing.T) {
|
||||
response := OffsetResponse{}
|
||||
|
||||
testVersionDecodable(t, "normal", &response, normalOffsetResponseV1, 1)
|
||||
|
||||
if len(response.Blocks) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["a"]) != 0 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
|
||||
}
|
||||
|
||||
if len(response.Blocks["z"]) != 1 {
|
||||
t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Err != ErrNoError {
|
||||
t.Fatal("Decoding produced invalid error for topic z partition 2.")
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Timestamp != 1477920049286 {
|
||||
t.Fatal("Decoding produced invalid timestamp for topic z partition 2.", response.Blocks["z"][2].Timestamp)
|
||||
}
|
||||
|
||||
if response.Blocks["z"][2].Offset != 6 {
|
||||
t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
|
||||
}
|
||||
}
|
45
vendor/src/github.com/Shopify/sarama/packet_decoder.go
vendored
Normal file
45
vendor/src/github.com/Shopify/sarama/packet_decoder.go
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package sarama
|
||||
|
||||
// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
|
||||
// Types implementing Decoder only need to worry about calling methods like GetString,
|
||||
// not about how a string is represented in Kafka.
|
||||
type packetDecoder interface {
|
||||
// Primitives
|
||||
getInt8() (int8, error)
|
||||
getInt16() (int16, error)
|
||||
getInt32() (int32, error)
|
||||
getInt64() (int64, error)
|
||||
getArrayLength() (int, error)
|
||||
|
||||
// Collections
|
||||
getBytes() ([]byte, error)
|
||||
getString() (string, error)
|
||||
getInt32Array() ([]int32, error)
|
||||
getInt64Array() ([]int64, error)
|
||||
getStringArray() ([]string, error)
|
||||
|
||||
// Subsets
|
||||
remaining() int
|
||||
getSubset(length int) (packetDecoder, error)
|
||||
|
||||
// Stacks, see PushDecoder
|
||||
push(in pushDecoder) error
|
||||
pop() error
|
||||
}
|
||||
|
||||
// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
|
||||
// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
|
||||
// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
|
||||
// depend upon have been decoded.
|
||||
type pushDecoder interface {
|
||||
// Saves the offset into the input buffer as the location to actually read the calculated value when able.
|
||||
saveOffset(in int)
|
||||
|
||||
// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
|
||||
reserveLength() int
|
||||
|
||||
// Indicates that all required data is now available to calculate and check the field.
|
||||
// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
|
||||
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
|
||||
check(curOffset int, buf []byte) error
|
||||
}
|
50
vendor/src/github.com/Shopify/sarama/packet_encoder.go
vendored
Normal file
50
vendor/src/github.com/Shopify/sarama/packet_encoder.go
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package sarama
|
||||
|
||||
import "github.com/rcrowley/go-metrics"
|
||||
|
||||
// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
|
||||
// Types implementing Encoder only need to worry about calling methods like PutString,
|
||||
// not about how a string is represented in Kafka.
|
||||
type packetEncoder interface {
|
||||
// Primitives
|
||||
putInt8(in int8)
|
||||
putInt16(in int16)
|
||||
putInt32(in int32)
|
||||
putInt64(in int64)
|
||||
putArrayLength(in int) error
|
||||
|
||||
// Collections
|
||||
putBytes(in []byte) error
|
||||
putRawBytes(in []byte) error
|
||||
putString(in string) error
|
||||
putStringArray(in []string) error
|
||||
putInt32Array(in []int32) error
|
||||
putInt64Array(in []int64) error
|
||||
|
||||
// Provide the current offset to record the batch size metric
|
||||
offset() int
|
||||
|
||||
// Stacks, see PushEncoder
|
||||
push(in pushEncoder)
|
||||
pop() error
|
||||
|
||||
// To record metrics when provided
|
||||
metricRegistry() metrics.Registry
|
||||
}
|
||||
|
||||
// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
|
||||
// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
|
||||
// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
|
||||
// depend upon have been written.
|
||||
type pushEncoder interface {
|
||||
// Saves the offset into the input buffer as the location to actually write the calculated value when able.
|
||||
saveOffset(in int)
|
||||
|
||||
// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
|
||||
reserveLength() int
|
||||
|
||||
// Indicates that all required data is now available to calculate and write the field.
|
||||
// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
|
||||
// of data to the saved offset, based on the data between the saved offset and curOffset.
|
||||
run(curOffset int, buf []byte) error
|
||||
}
|
123
vendor/src/github.com/Shopify/sarama/partitioner.go
vendored
Normal file
123
vendor/src/github.com/Shopify/sarama/partitioner.go
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package sarama
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
|
||||
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
|
||||
// as simple default implementations.
|
||||
type Partitioner interface {
|
||||
// Partition takes a message and partition count and chooses a partition
|
||||
Partition(message *ProducerMessage, numPartitions int32) (int32, error)
|
||||
|
||||
// RequiresConsistency indicates to the user of the partitioner whether the
|
||||
// mapping of key->partition is consistent or not. Specifically, if a
|
||||
// partitioner requires consistency then it must be allowed to choose from all
|
||||
// partitions (even ones known to be unavailable), and its choice must be
|
||||
// respected by the caller. The obvious example is the HashPartitioner.
|
||||
RequiresConsistency() bool
|
||||
}
|
||||
|
||||
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
|
||||
type PartitionerConstructor func(topic string) Partitioner
|
||||
|
||||
type manualPartitioner struct{}
|
||||
|
||||
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
|
||||
// ProducerMessage's Partition field as the partition to produce to.
|
||||
func NewManualPartitioner(topic string) Partitioner {
|
||||
return new(manualPartitioner)
|
||||
}
|
||||
|
||||
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
return message.Partition, nil
|
||||
}
|
||||
|
||||
func (p *manualPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type randomPartitioner struct {
|
||||
generator *rand.Rand
|
||||
}
|
||||
|
||||
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
|
||||
func NewRandomPartitioner(topic string) Partitioner {
|
||||
p := new(randomPartitioner)
|
||||
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
return int32(p.generator.Intn(int(numPartitions))), nil
|
||||
}
|
||||
|
||||
func (p *randomPartitioner) RequiresConsistency() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type roundRobinPartitioner struct {
|
||||
partition int32
|
||||
}
|
||||
|
||||
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
|
||||
func NewRoundRobinPartitioner(topic string) Partitioner {
|
||||
return &roundRobinPartitioner{}
|
||||
}
|
||||
|
||||
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
if p.partition >= numPartitions {
|
||||
p.partition = 0
|
||||
}
|
||||
ret := p.partition
|
||||
p.partition++
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *roundRobinPartitioner) RequiresConsistency() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type hashPartitioner struct {
|
||||
random Partitioner
|
||||
hasher hash.Hash32
|
||||
}
|
||||
|
||||
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
|
||||
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
|
||||
// modulus the number of partitions. This ensures that messages with the same key always end up on the
|
||||
// same partition.
|
||||
func NewHashPartitioner(topic string) Partitioner {
|
||||
p := new(hashPartitioner)
|
||||
p.random = NewRandomPartitioner(topic)
|
||||
p.hasher = fnv.New32a()
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
if message.Key == nil {
|
||||
return p.random.Partition(message, numPartitions)
|
||||
}
|
||||
bytes, err := message.Key.Encode()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.hasher.Reset()
|
||||
_, err = p.hasher.Write(bytes)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
partition := int32(p.hasher.Sum32()) % numPartitions
|
||||
if partition < 0 {
|
||||
partition = -partition
|
||||
}
|
||||
return partition, nil
|
||||
}
|
||||
|
||||
func (p *hashPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user