docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Sets cookie(s) as provided by the query string and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: query
name: freeform
explode: true
allowEmptyValue: true
schema:
type: object
additionalProperties:
type: string
style: form
produces:
- text/plain
responses:
200:
description: Redirect to cookie list | def set_cookies():
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for("view_cookies")))
for key, value in cookies.items():
r.set_cookie(key=key, value=value, secure=secure_cookie())
return r | 114,862 |
Deletes cookie(s) as provided by the query string and redirects to cookie list.
---
tags:
- Cookies
parameters:
- in: query
name: freeform
explode: true
allowEmptyValue: true
schema:
type: object
additionalProperties:
type: string
style: form
produces:
- text/plain
responses:
200:
description: Redirect to cookie list | def delete_cookies():
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for("view_cookies")))
for key, value in cookies.items():
r.delete_cookie(key=key)
return r | 114,863 |
Prompts the user for authorization using HTTP Basic Auth.
---
tags:
- Auth
parameters:
- in: path
name: user
type: string
- in: path
name: passwd
type: string
produces:
- application/json
responses:
200:
description: Sucessful authentication.
401:
description: Unsuccessful authentication. | def basic_auth(user="user", passwd="passwd"):
if not check_basic_auth(user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user) | 114,864 |
Prompts the user for authorization using HTTP Basic Auth.
---
tags:
- Auth
parameters:
- in: path
name: user
type: string
- in: path
name: passwd
type: string
produces:
- application/json
responses:
200:
description: Sucessful authentication.
404:
description: Unsuccessful authentication. | def hidden_basic_auth(user="user", passwd="passwd"):
if not check_basic_auth(user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user) | 114,865 |
Prompts the user for authorization using bearer authentication.
---
tags:
- Auth
parameters:
- in: header
name: Authorization
schema:
type: string
produces:
- application/json
responses:
200:
description: Sucessful authentication.
401:
description: Unsuccessful authentication. | def bearer_auth():
authorization = request.headers.get("Authorization")
if not (authorization and authorization.startswith("Bearer ")):
response = app.make_response("")
response.headers["WWW-Authenticate"] = "Bearer"
response.status_code = 401
return response
slice_start = len("Bearer ")
token = authorization[slice_start:]
return jsonify(authenticated=True, token=token) | 114,866 |
Returns a delayed response (max of 10 seconds).
---
tags:
- Dynamic data
parameters:
- in: path
name: delay
type: int
produces:
- application/json
responses:
200:
description: A delayed response. | def delay_response(delay):
delay = min(float(delay), 10)
time.sleep(delay)
return jsonify(
get_dict("url", "args", "form", "data", "origin", "headers", "files")
) | 114,869 |
Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise.
---
tags:
- Response inspection
parameters:
- in: header
name: If-Modified-Since
- in: header
name: If-None-Match
produces:
- application/json
responses:
200:
description: Cached response
304:
description: Modified | def cache():
is_conditional = request.headers.get("If-Modified-Since") or request.headers.get(
"If-None-Match"
)
if is_conditional is None:
response = view_get()
response.headers["Last-Modified"] = http_date()
response.headers["ETag"] = uuid.uuid4().hex
return response
else:
return status_code(304) | 114,871 |
Assumes the resource has the given etag and responds to If-None-Match and If-Match headers appropriately.
---
tags:
- Response inspection
parameters:
- in: header
name: If-None-Match
- in: header
name: If-Match
produces:
- application/json
responses:
200:
description: Normal response
412:
description: match | def etag(etag):
if_none_match = parse_multi_value_header(request.headers.get("If-None-Match"))
if_match = parse_multi_value_header(request.headers.get("If-Match"))
if if_none_match:
if etag in if_none_match or "*" in if_none_match:
response = status_code(304)
response.headers["ETag"] = etag
return response
elif if_match:
if etag not in if_match and "*" not in if_match:
return status_code(412)
# Special cases don't apply, return normal response
response = view_get()
response.headers["ETag"] = etag
return response | 114,872 |
Returns n random bytes generated with given seed
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes. | def random_bytes(n):
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if "seed" in params:
random.seed(int(params["seed"]))
response = make_response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = "application/octet-stream"
return response | 114,873 |
Streams n random bytes generated with given seed, at given chunk size per packet.
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes. | def stream_random_bytes(n):
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if "seed" in params:
random.seed(int(params["seed"]))
if "chunk_size" in params:
chunk_size = max(1, int(params["chunk_size"]))
else:
chunk_size = 10 * 1024
def generate_bytes():
chunks = bytearray()
for i in xrange(n):
chunks.append(random.randint(0, 255))
if len(chunks) == chunk_size:
yield (bytes(chunks))
chunks = bytearray()
if chunks:
yield (bytes(chunks))
headers = {"Content-Type": "application/octet-stream"}
return Response(generate_bytes(), headers=headers) | 114,874 |
Streams n random bytes generated with given seed, at given chunk size per packet.
---
tags:
- Dynamic data
parameters:
- in: path
name: numbytes
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes. | def range_request(numbytes):
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(
headers={"ETag": "range%d" % numbytes, "Accept-Ranges": "bytes"}
)
response.status_code = 404
response.data = "number of bytes must be in the range (0, 102400]"
return response
params = CaseInsensitiveDict(request.args.items())
if "chunk_size" in params:
chunk_size = max(1, int(params["chunk_size"]))
else:
chunk_size = 10 * 1024
duration = float(params.get("duration", 0))
pause_per_byte = duration / numbytes
request_headers = get_headers()
first_byte_pos, last_byte_pos = get_request_range(request_headers, numbytes)
range_length = (last_byte_pos + 1) - first_byte_pos
if (
first_byte_pos > last_byte_pos
or first_byte_pos not in xrange(0, numbytes)
or last_byte_pos not in xrange(0, numbytes)
):
response = Response(
headers={
"ETag": "range%d" % numbytes,
"Accept-Ranges": "bytes",
"Content-Range": "bytes */%d" % numbytes,
"Content-Length": "0",
}
)
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord("a") + (i % 26))
if len(chunks) == chunk_size:
yield (bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield (bytes(chunks))
content_range = "bytes %d-%d/%d" % (first_byte_pos, last_byte_pos, numbytes)
response_headers = {
"Content-Type": "application/octet-stream",
"ETag": "range%d" % numbytes,
"Accept-Ranges": "bytes",
"Content-Length": str(range_length),
"Content-Range": content_range,
}
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response | 114,875 |
Generate a page containing n links to other pages which do the same.
---
tags:
- Dynamic data
parameters:
- in: path
name: n
type: int
- in: path
name: offset
type: int
produces:
- text/html
responses:
200:
description: HTML links. | def link_page(n, offset):
n = min(max(1, n), 200) # limit to between 1 and 200 links
link = "<a href='{0}'>{1}</a> "
html = ["<html><head><title>Links</title></head><body>"]
for i in xrange(n):
if i == offset:
html.append("{0} ".format(i))
else:
html.append(link.format(url_for("link_page", n=n, offset=i), i))
html.append("</body></html>")
return "".join(html) | 114,876 |
Add a metric to the metric family.
Args:
labels: A list of label values
value: The value of the metric
created: Optional unix timestamp the child was created at. | def add_metric(self, labels, value, created=None, timestamp=None):
self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp))
if created is not None:
self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp)) | 115,005 |
Add a metric to the metric family.
Args:
labels: A list of label values
count_value: The count value of the metric.
sum_value: The sum value of the metric. | def add_metric(self, labels, count_value, sum_value, timestamp=None):
self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp))
self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp)) | 115,007 |
Add a metric to the metric family.
Args:
labels: A list of label values
buckets: A list of lists.
Each inner list can be a pair of bucket name and value,
or a triple of bucket name, value, and exemplar.
The buckets must be sorted, and +Inf present.
sum_value: The sum value of the metric. | def add_metric(self, labels, buckets, sum_value, timestamp=None):
for b in buckets:
bucket, value = b[:2]
exemplar = None
if len(b) == 3:
exemplar = b[2]
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value,
timestamp,
exemplar,
))
# +Inf is last and provides the count value.
self.samples.extend([
Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp),
]) | 115,008 |
Add a metric to the metric family.
Args:
labels: A list of label values
buckets: A list of pairs of bucket names and values.
The buckets must be sorted, and +Inf present.
gsum_value: The sum value of the metric. | def add_metric(self, labels, buckets, gsum_value, timestamp=None):
for bucket, value in buckets:
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value, timestamp))
# +Inf is last and provides the count value.
self.samples.extend([
Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp),
]) | 115,009 |
Add a metric to the metric family.
Args:
labels: A list of label values
value: A dict of labels | def add_metric(self, labels, value, timestamp=None):
self.samples.append(Sample(
self.name + '_info',
dict(dict(zip(self._labelnames, labels)), **value),
1,
timestamp,
)) | 115,010 |
Add a metric to the metric family.
Args:
labels: A list of label values
value: A dict of string state names to booleans | def add_metric(self, labels, value, timestamp=None):
labels = tuple(labels)
for state, enabled in sorted(value.items()):
v = (1 if enabled else 0)
self.samples.append(Sample(
self.name,
dict(zip(self._labelnames + (self.name,), labels + (state,))),
v,
timestamp,
)) | 115,011 |
Create an TREC dataset instance given a path and fields.
Arguments:
path: Path to the data file.
text_field: The field that will be used for text data.
label_field: The field that will be used for label data.
fine_grained: Whether to use the fine-grained (50-class) version of TREC
or the coarse grained (6-class) version.
Remaining keyword arguments: Passed to the constructor of
data.Dataset. | def __init__(self, path, text_field, label_field,
fine_grained=False, **kwargs):
fields = [('text', text_field), ('label', label_field)]
examples = []
def get_label_str(label):
return label.split(':')[0] if not fine_grained else label
label_field.preprocessing = data.Pipeline(get_label_str)
for line in open(os.path.expanduser(path), 'rb'):
# there is one non-ASCII byte: sisterBADBYTEcity; replaced with space
label, _, text = line.replace(b'\xf0', b' ').decode().partition(' ')
examples.append(data.Example.fromlist([text, label], fields))
super(TREC, self).__init__(examples, fields, **kwargs) | 115,686 |
Create a TranslationDataset given paths and fields.
Arguments:
path: Common prefix of paths to the data files for both languages.
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
in each language.
Remaining keyword arguments: Passed to the constructor of
data.Dataset. | def __init__(self, path, exts, fields, **kwargs):
if not isinstance(fields[0], (tuple, list)):
fields = [('src', fields[0]), ('trg', fields[1])]
src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)
examples = []
with io.open(src_path, mode='r', encoding='utf-8') as src_file, \
io.open(trg_path, mode='r', encoding='utf-8') as trg_file:
for src_line, trg_line in zip(src_file, trg_file):
src_line, trg_line = src_line.strip(), trg_line.strip()
if src_line != '' and trg_line != '':
examples.append(data.Example.fromlist(
[src_line, trg_line], fields))
super(TranslationDataset, self).__init__(examples, fields, **kwargs) | 115,721 |
Process a list of examples to create a batch.
Postprocess the batch with user-provided Pipeline.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
object: Processed object given the input and custom
postprocessing Pipeline. | def process(self, batch, *args, **kwargs):
if self.postprocessing is not None:
batch = self.postprocessing(batch)
return batch | 115,727 |
Process a list of examples to create a torch.Tensor.
Pad, numericalize, and postprocess a batch and create a tensor.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
torch.autograd.Variable: Processed object given the input
and custom postprocessing Pipeline. | def process(self, batch, device=None):
padded = self.pad(batch)
tensor = self.numericalize(padded, device=device)
return tensor | 115,733 |
Segment one or more datasets with this subword field.
Arguments:
Positional arguments: Dataset objects or other indexable
mutable sequences to segment. If a Dataset object is provided,
all columns corresponding to this field are used; individual
columns can also be provided directly. | def segment(self, *args):
sources = []
for arg in args:
if isinstance(arg, Dataset):
sources += [getattr(arg, name) for name, field in
arg.fields.items() if field is self]
else:
sources.append(arg)
for data in sources:
for x in tqdm(data, 'segmenting'):
x[:] = self.vocab.segment(x) | 115,740 |
Preprocess a single example.
Firstly, tokenization and the supplied preprocessing pipeline is applied. Since
this field is always sequential, the result is a list. Then, each element of
the list is preprocessed using ``self.nesting_field.preprocess`` and the resulting
list is returned.
Arguments:
xs (list or str): The input to preprocess.
Returns:
list: The preprocessed list. | def preprocess(self, xs):
return [self.nesting_field.preprocess(x)
for x in super(NestedField, self).preprocess(xs)] | 115,742 |
Create Iterator objects for multiple splits of a dataset.
Arguments:
datasets: Tuple of Dataset objects corresponding to the splits. The
first such object should be the train set.
batch_sizes: Tuple of batch sizes to use for the different splits,
or None to use the same batch_size for all splits.
Remaining keyword arguments: Passed to the constructor of the
iterator class being used. | def splits(cls, datasets, batch_sizes=None, **kwargs):
if batch_sizes is None:
batch_sizes = [kwargs.pop('batch_size')] * len(datasets)
ret = []
for i in range(len(datasets)):
train = i == 0
ret.append(cls(
datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))
return tuple(ret) | 115,750 |
Create an IMDB dataset instance given a path and fields.
Arguments:
path: Path to the dataset's highest level directory
text_field: The field that will be used for text data.
label_field: The field that will be used for label data.
Remaining keyword arguments: Passed to the constructor of
data.Dataset. | def __init__(self, path, text_field, label_field, **kwargs):
fields = [('text', text_field), ('label', label_field)]
examples = []
for label in ['pos', 'neg']:
for fname in glob.iglob(os.path.join(path, label, '*.txt')):
with io.open(fname, 'r', encoding="utf-8") as f:
text = f.readline()
examples.append(data.Example.fromlist([text, label], fields))
super(IMDB, self).__init__(examples, fields, **kwargs) | 115,771 |
Create a LanguageModelingDataset given a path and a field.
Arguments:
path: Path to the data file.
text_field: The field that will be used for text data.
newline_eos: Whether to add an <eos> token for every newline in the
data file. Default: True.
Remaining keyword arguments: Passed to the constructor of
data.Dataset. | def __init__(self, path, text_field, newline_eos=True,
encoding='utf-8', **kwargs):
fields = [('text', text_field)]
text = []
with io.open(path, encoding=encoding) as f:
for line in f:
text += text_field.preprocess(line)
if newline_eos:
text.append(u'<eos>')
examples = [data.Example.fromlist([text], fields)]
super(LanguageModelingDataset, self).__init__(
examples, fields, **kwargs) | 115,778 |
Create a pipeline.
Arguments:
convert_token: The function to apply to input sequence data.
If None, the identity function is used. Default: None | def __init__(self, convert_token=None):
if convert_token is None:
self.convert_token = Pipeline.identity
elif callable(convert_token):
self.convert_token = convert_token
else:
raise ValueError("Pipeline input convert_token {} is not None "
"or callable".format(convert_token))
self.pipes = [self] | 115,781 |
Apply the the current Pipeline(s) to an input.
Arguments:
x: The input to process with the Pipeline(s).
Positional arguments: Forwarded to the `call` function
of the Pipeline(s). | def __call__(self, x, *args):
for pipe in self.pipes:
x = pipe.call(x, *args)
return x | 115,782 |
Apply _only_ the convert_token function of the current pipeline
to the input. If the input is a list, a list with the results of
applying the `convert_token` function to all input elements is
returned.
Arguments:
x: The input to apply the convert_token function to.
Positional arguments: Forwarded to the `convert_token` function
of the current Pipeline. | def call(self, x, *args):
if isinstance(x, list):
return [self.convert_token(tok, *args) for tok in x]
return self.convert_token(x, *args) | 115,783 |
Add a Pipeline to be applied before this processing pipeline.
Arguments:
pipeline: The Pipeline or callable to apply before this
Pipeline. | def add_before(self, pipeline):
if not isinstance(pipeline, Pipeline):
pipeline = Pipeline(pipeline)
self.pipes = pipeline.pipes[:] + self.pipes[:]
return self | 115,784 |
Add a Pipeline to be applied after this processing pipeline.
Arguments:
pipeline: The Pipeline or callable to apply after this
Pipeline. | def add_after(self, pipeline):
if not isinstance(pipeline, Pipeline):
pipeline = Pipeline(pipeline)
self.pipes = self.pipes[:] + pipeline.pipes[:]
return self | 115,785 |
Create a dataset from a list of Examples and Fields.
Arguments:
examples: List of Examples.
fields (List(tuple(str, Field))): The Fields to use in this tuple. The
string is a field name, and the Field is the associated field.
filter_pred (callable or None): Use only examples for which
filter_pred(example) is True, or use all examples if None.
Default is None. | def __init__(self, examples, fields, filter_pred=None):
if filter_pred is not None:
make_list = isinstance(examples, list)
examples = filter(filter_pred, examples)
if make_list:
examples = list(examples)
self.examples = examples
self.fields = dict(fields)
# Unpack field tuples
for n, f in list(self.fields.items()):
if isinstance(n, tuple):
self.fields.update(zip(n, f))
del self.fields[n] | 115,789 |
Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset. | def download(cls, root, check=None):
path = os.path.join(root, cls.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in cls.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
download_from_url(url, zpath)
zroot, ext = os.path.splitext(zpath)
_, ext_inner = os.path.splitext(zroot)
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
# tarfile cannot handle bare .gz files
elif ext == '.tgz' or ext == '.gz' and ext_inner == '.tar':
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
elif ext == '.gz':
with gzip.open(zpath, 'rb') as gz:
with open(zroot, 'wb') as uncompressed:
shutil.copyfileobj(gz, uncompressed)
return os.path.join(path, cls.dirname) | 115,792 |
Remove unknown words from dataset examples with respect to given field.
Arguments:
field_names (list(str)): Within example only the parts with field names in
field_names will have their unknown words deleted. | def filter_examples(self, field_names):
for i, example in enumerate(self.examples):
for field_name in field_names:
vocab = set(self.fields[field_name].vocab.stoi)
text = getattr(example, field_name)
example_part = [word for word in text if word in vocab]
setattr(example, field_name, example_part)
self.examples[i] = example | 115,793 |
CIFAR-10 dataset and TF model constructor.
Args:
batch_size: dataset batch size. | def __init__(self, batch_size=8, data_dir=None):
self._train_data, self._train_labels = None, None
self._test_data, self._test_labels = None, None
self._batch_size = batch_size
self.img_size = IMAGE_SIZE
self.num_channels = NUM_CHANNELS
self.num_classes = NUM_CLASSES
self.train_len = NUM_TRAIN_SAMPLES
self.test_len = NUM_TEST_SAMPLES
self.data_dir = data_dir or "./test_data"
self.cifar10_dir = os.path.join(self.data_dir, 'cifar-10-batches-py')
self.cifar10_tarball = os.path.join(self.data_dir, CIFAR10_URL.split('/')[-1])
self.maybe_download_and_extract() | 115,999 |
Load the data in memory.
Args:
dataset: string in ['train', 'test'] | def _load(self, dataset='train'):
data, labels = None, None
if dataset is 'train':
files = [os.path.join(self.cifar10_dir, 'data_batch_%d' % i) for i in range(1, 6)]
else:
files = [os.path.join(self.cifar10_dir, 'test_batch')]
for file in files:
if not os.path.exists(file):
raise FileNotFoundError('Failed to find file: ' + file)
# Load the data from the batch files
for file in files:
with open(file, 'rb') as f:
cifar10 = pickle.load(f, encoding='latin1')
if labels is None:
labels = np.array(cifar10['labels'])
else:
labels = np.concatenate((labels, cifar10['labels']), axis=0)
if data is None:
data = cifar10['data']
else:
data = np.concatenate((data, cifar10['data']), axis=0)
# Adapt the format of the data to our convnet
data = np.array(data, dtype=float) / 255.0
data = data.reshape([-1, self.num_channels, self.img_size, self.img_size])
data = data.transpose([0, 2, 3, 1])
# One-hot encode labels (see https://stackoverflow.com/a/42874726)
labels = np.eye(self.num_classes)[np.array(labels).reshape(-1)]
if dataset is 'train':
self._train_data, self._train_labels = data, labels
else:
self._test_data, self._test_labels = data, labels | 116,000 |
Build a simple convnet (BN before ReLU).
Args:
inputs: a tensor of size [batch_size, height, width, channels]
mode: string in ['train', 'test']
Returns:
the last op containing the predictions
Note:
Best score
Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656
Worst score
Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874 | def model(self, inputs, mode='train'):
# Extract features
training = (mode == 'train')
with tf.variable_scope('conv1') as scope:
conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv2') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
with tf.variable_scope('conv3') as scope:
conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME')
bn = tf.layers.batch_normalization(inputs=conv, training=training)
bn = tf.nn.relu(bn)
pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name)
# Classify
with tf.variable_scope('fc') as scope:
flat = tf.layers.flatten(pool)
fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu)
softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax)
return softmax | 116,001 |
Lookup and set offsets for any partitions which are awaiting an
explicit reset.
Arguments:
partitions (set of TopicPartitions): the partitions to reset | def reset_offsets_if_needed(self, partitions):
for tp in partitions:
# TODO: If there are several offsets to reset, we could submit offset requests in parallel
if self._subscriptions.is_assigned(tp) and self._subscriptions.is_offset_reset_needed(tp):
self._reset_offset(tp) | 116,018 |
Update the fetch positions for the provided partitions.
Arguments:
partitions (list of TopicPartitions): partitions to update
Raises:
NoOffsetForPartitionError: if no offset is stored for a given
partition and no reset policy is available | def update_fetch_positions(self, partitions):
# reset the fetch position to the committed position
for tp in partitions:
if not self._subscriptions.is_assigned(tp):
log.warning("partition %s is not assigned - skipping offset"
" update", tp)
continue
elif self._subscriptions.is_fetchable(tp):
log.warning("partition %s is still fetchable -- skipping offset"
" update", tp)
continue
if self._subscriptions.is_offset_reset_needed(tp):
self._reset_offset(tp)
elif self._subscriptions.assignment[tp].committed is None:
# there's no committed position, so we need to reset with the
# default strategy
self._subscriptions.need_offset_reset(tp)
self._reset_offset(tp)
else:
committed = self._subscriptions.assignment[tp].committed
log.debug("Resetting offset for partition %s to the committed"
" offset %s", tp, committed)
self._subscriptions.seek(tp, committed) | 116,020 |
Reset offsets for the given partition using the offset reset strategy.
Arguments:
partition (TopicPartition): the partition that needs reset offset
Raises:
NoOffsetForPartitionError: if no offset reset strategy is defined | def _reset_offset(self, partition):
timestamp = self._subscriptions.assignment[partition].reset_strategy
if timestamp is OffsetResetStrategy.EARLIEST:
strategy = 'earliest'
elif timestamp is OffsetResetStrategy.LATEST:
strategy = 'latest'
else:
raise NoOffsetForPartitionError(partition)
log.debug("Resetting offset for partition %s to %s offset.",
partition, strategy)
offsets = self._retrieve_offsets({partition: timestamp})
if partition not in offsets:
raise NoOffsetForPartitionError(partition)
offset = offsets[partition][0]
# we might lose the assignment while fetching the offset,
# so check it is still active
if self._subscriptions.is_assigned(partition):
self._subscriptions.seek(partition, offset) | 116,025 |
Fetch offsets for each partition in timestamps dict. This may send
request to multiple nodes, based on who is Leader for partition.
Arguments:
timestamps (dict): {TopicPartition: int} mapping of fetching
timestamps.
Returns:
Future: resolves to a mapping of retrieved offsets | def _send_offset_requests(self, timestamps):
timestamps_by_node = collections.defaultdict(dict)
for partition, timestamp in six.iteritems(timestamps):
node_id = self._client.cluster.leader_for_partition(partition)
if node_id is None:
self._client.add_topic(partition.topic)
log.debug("Partition %s is unknown for fetching offset,"
" wait for metadata refresh", partition)
return Future().failure(Errors.StaleMetadata(partition))
elif node_id == -1:
log.debug("Leader for partition %s unavailable for fetching "
"offset, wait for metadata refresh", partition)
return Future().failure(
Errors.LeaderNotAvailableError(partition))
else:
timestamps_by_node[node_id][partition] = timestamp
# Aggregate results until we have all
list_offsets_future = Future()
responses = []
node_count = len(timestamps_by_node)
def on_success(value):
responses.append(value)
if len(responses) == node_count:
offsets = {}
for r in responses:
offsets.update(r)
list_offsets_future.success(offsets)
def on_fail(err):
if not list_offsets_future.is_done:
list_offsets_future.failure(err)
for node_id, timestamps in six.iteritems(timestamps_by_node):
_f = self._send_offset_request(node_id, timestamps)
_f.add_callback(on_success)
_f.add_errback(on_fail)
return list_offsets_future | 116,032 |
Callback for the response of the list offset call above.
Arguments:
future (Future): the future to update based on response
response (OffsetResponse): response from the server
Raises:
AssertionError: if response does not match partition | def _handle_offset_response(self, future, response):
timestamp_offset_map = {}
for topic, part_data in response.topics:
for partition_info in part_data:
partition, error_code = partition_info[:2]
partition = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
if response.API_VERSION == 0:
offsets = partition_info[2]
assert len(offsets) <= 1, 'Expected OffsetResponse with one offset'
if not offsets:
offset = UNKNOWN_OFFSET
else:
offset = offsets[0]
log.debug("Handling v0 ListOffsetResponse response for %s. "
"Fetched offset %s", partition, offset)
if offset != UNKNOWN_OFFSET:
timestamp_offset_map[partition] = (offset, None)
else:
timestamp, offset = partition_info[2:]
log.debug("Handling ListOffsetResponse response for %s. "
"Fetched offset %s, timestamp %s",
partition, offset, timestamp)
if offset != UNKNOWN_OFFSET:
timestamp_offset_map[partition] = (offset, timestamp)
elif error_type is Errors.UnsupportedForMessageFormatError:
# The message format on the broker side is before 0.10.0,
# we simply put None in the response.
log.debug("Cannot search by timestamp for partition %s because the"
" message format version is before 0.10.0", partition)
elif error_type is Errors.NotLeaderForPartitionError:
log.debug("Attempt to fetch offsets for partition %s failed due"
" to obsolete leadership information, retrying.",
partition)
future.failure(error_type(partition))
return
elif error_type is Errors.UnknownTopicOrPartitionError:
log.warning("Received unknown topic or partition error in ListOffset "
"request for partition %s. The topic/partition " +
"may not exist or the user may not have Describe access "
"to it.", partition)
future.failure(error_type(partition))
return
else:
log.warning("Attempt to fetch offsets for partition %s failed due to:"
" %s", partition, error_type)
future.failure(error_type(partition))
return
if not future.is_done:
future.success(timestamp_offset_map) | 116,034 |
Pure-python Murmur2 implementation.
Based on java client, see org.apache.kafka.common.utils.Utils.murmur2
Args:
data (bytes): opaque bytes
Returns: MurmurHash2 of data | def murmur2(data):
# Python2 bytes is really a str, causing the bitwise operations below to fail
# so convert to bytearray.
if six.PY2:
data = bytearray(bytes(data))
length = len(data)
seed = 0x9747b28c
# 'm' and 'r' are mixing constants generated offline.
# They're not really 'magic', they just happen to work well.
m = 0x5bd1e995
r = 24
# Initialize the hash to a random value
h = seed ^ length
length4 = length // 4
for i in range(length4):
i4 = i * 4
k = ((data[i4 + 0] & 0xff) +
((data[i4 + 1] & 0xff) << 8) +
((data[i4 + 2] & 0xff) << 16) +
((data[i4 + 3] & 0xff) << 24))
k &= 0xffffffff
k *= m
k &= 0xffffffff
k ^= (k % 0x100000000) >> r # k ^= k >>> r
k &= 0xffffffff
k *= m
k &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= k
h &= 0xffffffff
# Handle the last few bytes of the input array
extra_bytes = length % 4
if extra_bytes >= 3:
h ^= (data[(length & ~3) + 2] & 0xff) << 16
h &= 0xffffffff
if extra_bytes >= 2:
h ^= (data[(length & ~3) + 1] & 0xff) << 8
h &= 0xffffffff
if extra_bytes >= 1:
h ^= (data[length & ~3] & 0xff)
h &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= (h % 0x100000000) >> 13 # h >>> 13;
h &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= (h % 0x100000000) >> 15 # h >>> 15;
h &= 0xffffffff
return h | 116,045 |
Perform leader synchronization and send back the assignment
for the group via SyncGroupRequest
Arguments:
response (JoinResponse): broker response to parse
Returns:
Future: resolves to member assignment encoded-bytes | def _on_join_leader(self, response):
try:
group_assignment = self._perform_assignment(response.leader_id,
response.group_protocol,
response.members)
except Exception as e:
return Future().failure(e)
version = 0 if self.config['api_version'] < (0, 11, 0) else 1
request = SyncGroupRequest[version](
self.group_id,
self._generation.generation_id,
self._generation.member_id,
[(member_id,
assignment if isinstance(assignment, bytes) else assignment.encode())
for member_id, assignment in six.iteritems(group_assignment)])
log.debug("Sending leader SyncGroup for group %s to coordinator %s: %s",
self.group_id, self.coordinator_id, request)
return self._send_sync_group_request(request) | 116,100 |
Create a metrics repository with a default config, given metric
reporters and the ability to expire eligible sensors
Arguments:
default_config (MetricConfig, optional): The default config
reporters (list of AbstractMetricsReporter, optional):
The metrics reporters
enable_expiration (bool, optional): true if the metrics instance
can garbage collect inactive sensors, false otherwise | def __init__(self, default_config=None, reporters=None,
enable_expiration=False):
self._lock = threading.RLock()
self._config = default_config or MetricConfig()
self._sensors = {}
self._metrics = {}
self._children_sensors = {}
self._reporters = reporters or []
for reporter in self._reporters:
reporter.init([])
if enable_expiration:
def expire_loop():
while True:
# delay 30 seconds
time.sleep(30)
self.ExpireSensorTask.run(self)
metrics_scheduler = threading.Thread(target=expire_loop)
# Creating a daemon thread to not block shutdown
metrics_scheduler.daemon = True
metrics_scheduler.start()
self.add_metric(self.metric_name('count', 'kafka-metrics-count',
'total number of registered metrics'),
AnonMeasurable(lambda config, now: len(self._metrics))) | 116,138 |
Remove a sensor (if it exists), associated metrics and its children.
Arguments:
name (str): The name of the sensor to be removed | def remove_sensor(self, name):
sensor = self._sensors.get(name)
if sensor:
child_sensors = None
with sensor._lock:
with self._lock:
val = self._sensors.pop(name, None)
if val and val == sensor:
for metric in sensor.metrics:
self.remove_metric(metric.metric_name)
logger.debug('Removed sensor with name %s', name)
child_sensors = self._children_sensors.pop(sensor, None)
if child_sensors:
for child_sensor in child_sensors:
self.remove_sensor(child_sensor.name) | 116,141 |
Add a metric to monitor an object that implements measurable.
This metric won't be associated with any sensor.
This is a way to expose existing values as metrics.
Arguments:
metricName (MetricName): The name of the metric
measurable (AbstractMeasurable): The measurable that will be
measured by this metric
config (MetricConfig, optional): The configuration to use when
measuring this measurable | def add_metric(self, metric_name, measurable, config=None):
# NOTE there was a lock here, but i don't think it's needed
metric = KafkaMetric(metric_name, measurable, config or self.config)
self.register_metric(metric) | 116,142 |
Remove a metric if it exists and return it. Return None otherwise.
If a metric is removed, `metric_removal` will be invoked
for each reporter.
Arguments:
metric_name (MetricName): The name of the metric
Returns:
KafkaMetric: the removed `KafkaMetric` or None if no such
metric exists | def remove_metric(self, metric_name):
with self._lock:
metric = self._metrics.pop(metric_name, None)
if metric:
for reporter in self._reporters:
reporter.metric_removal(metric)
return metric | 116,143 |
Construct a Snappy Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional) | def create_snappy_message(payloads, key=None):
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
snapped = snappy_encode(message_set)
codec = ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY
return kafka.structs.Message(0, 0x00 | codec, key, snapped) | 116,161 |
Encode a ProduceRequest struct
Arguments:
payloads: list of ProduceRequestPayload
acks: How "acky" you want the request to be
1: written to disk by the leader
0: immediate response
-1: waits for all replicas to be in sync
timeout: Maximum time (in ms) the server will wait for replica acks.
This is _not_ a socket timeout
Returns: ProduceRequest | def encode_produce_request(cls, payloads=(), acks=1, timeout=1000):
if acks not in (1, 0, -1):
raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks)
topics = []
for topic, topic_payloads in group_by_topic_and_partition(payloads).items():
topic_msgs = []
for partition, payload in topic_payloads.items():
partition_msgs = []
for msg in payload.messages:
m = kafka.protocol.message.Message(
msg.value, key=msg.key,
magic=msg.magic, attributes=msg.attributes
)
partition_msgs.append((0, m.encode()))
topic_msgs.append((partition, MessageSet.encode(partition_msgs, prepend_size=False)))
topics.append((topic, topic_msgs))
return kafka.protocol.produce.ProduceRequest[0](
required_acks=acks,
timeout=timeout,
topics=topics
) | 116,165 |
Decode ProduceResponse to ProduceResponsePayload
Arguments:
response: ProduceResponse
Return: list of ProduceResponsePayload | def decode_produce_response(cls, response):
return [
kafka.structs.ProduceResponsePayload(topic, partition, error, offset)
for topic, partitions in response.topics
for partition, error, offset in partitions
] | 116,166 |
Encodes a FetchRequest struct
Arguments:
payloads: list of FetchRequestPayload
max_wait_time (int, optional): ms to block waiting for min_bytes
data. Defaults to 100.
min_bytes (int, optional): minimum bytes required to return before
max_wait_time. Defaults to 4096.
Return: FetchRequest | def encode_fetch_request(cls, payloads=(), max_wait_time=100, min_bytes=4096):
return kafka.protocol.fetch.FetchRequest[0](
replica_id=-1,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
topics=[(
topic,
[(
partition,
payload.offset,
payload.max_bytes)
for partition, payload in topic_payloads.items()])
for topic, topic_payloads in group_by_topic_and_partition(payloads).items()]) | 116,167 |
Decode FetchResponse struct to FetchResponsePayloads
Arguments:
response: FetchResponse | def decode_fetch_response(cls, response):
return [
kafka.structs.FetchResponsePayload(
topic, partition, error, highwater_offset, [
offset_and_msg
for offset_and_msg in cls.decode_message_set(messages)])
for topic, partitions in response.topics
for partition, error, highwater_offset, messages in partitions
] | 116,168 |
Decode OffsetResponse into OffsetResponsePayloads
Arguments:
response: OffsetResponse
Returns: list of OffsetResponsePayloads | def decode_offset_response(cls, response):
return [
kafka.structs.OffsetResponsePayload(topic, partition, error, tuple(offsets))
for topic, partitions in response.topics
for partition, error, offsets in partitions
] | 116,171 |
Decode OffsetResponse_v2 into ListOffsetResponsePayloads
Arguments:
response: OffsetResponse_v2
Returns: list of ListOffsetResponsePayloads | def decode_list_offset_response(cls, response):
return [
kafka.structs.ListOffsetResponsePayload(topic, partition, error, timestamp, offset)
for topic, partitions in response.topics
for partition, error, timestamp, offset in partitions
] | 116,172 |
Encode a MetadataRequest
Arguments:
topics: list of strings | def encode_metadata_request(cls, topics=(), payloads=None):
if payloads is not None:
topics = payloads
return kafka.protocol.metadata.MetadataRequest[0](topics) | 116,173 |
Encode a ConsumerMetadataRequest
Arguments:
client_id: string
correlation_id: int
payloads: string (consumer group) | def encode_consumer_metadata_request(cls, client_id, correlation_id, payloads):
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.CONSUMER_METADATA_KEY))
message.append(struct.pack('>h%ds' % len(payloads), len(payloads), payloads))
msg = b''.join(message)
return write_int_string(msg) | 116,174 |
Decode bytes to a kafka.structs.ConsumerMetadataResponse
Arguments:
data: bytes to decode | def decode_consumer_metadata_response(cls, data):
((correlation_id, error, nodeId), cur) = relative_unpack('>ihi', data, 0)
(host, cur) = read_short_string(data, cur)
((port,), cur) = relative_unpack('>i', data, cur)
return kafka.structs.ConsumerMetadataResponse(error, nodeId, host, port) | 116,175 |
Encode an OffsetCommitRequest struct
Arguments:
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequestPayload | def encode_offset_commit_request(cls, group, payloads):
return kafka.protocol.commit.OffsetCommitRequest[0](
consumer_group=group,
topics=[(
topic,
[(
partition,
payload.offset,
payload.metadata)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) | 116,176 |
Decode OffsetCommitResponse to an OffsetCommitResponsePayload
Arguments:
response: OffsetCommitResponse | def decode_offset_commit_response(cls, response):
return [
kafka.structs.OffsetCommitResponsePayload(topic, partition, error)
for topic, partitions in response.topics
for partition, error in partitions
] | 116,177 |
Encode an OffsetFetchRequest struct. The request is encoded using
version 0 if from_kafka is false, indicating a request for Zookeeper
offsets. It is encoded using version 1 otherwise, indicating a request
for Kafka offsets.
Arguments:
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequestPayload
from_kafka: bool, default False, set True for Kafka-committed offsets | def encode_offset_fetch_request(cls, group, payloads, from_kafka=False):
version = 1 if from_kafka else 0
return kafka.protocol.commit.OffsetFetchRequest[version](
consumer_group=group,
topics=[(
topic,
list(topic_payloads.keys()))
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))]) | 116,178 |
Decode OffsetFetchResponse to OffsetFetchResponsePayloads
Arguments:
response: OffsetFetchResponse | def decode_offset_fetch_response(cls, response):
return [
kafka.structs.OffsetFetchResponsePayload(
topic, partition, offset, metadata, error
)
for topic, partitions in response.topics
for partition, offset, metadata, error in partitions
] | 116,179 |
Record a value at a known time.
Arguments:
value (double): The value we are recording
time_ms (int): A POSIX timestamp in milliseconds.
Default: The time when record() is evaluated (now)
Raises:
QuotaViolationException: if recording this value moves a
metric beyond its configured maximum or minimum bound | def record(self, value=1.0, time_ms=None):
if time_ms is None:
time_ms = time.time() * 1000
self._last_record_time = time_ms
with self._lock: # XXX high volume, might be performance issue
# increment all the stats
for stat in self._stats:
stat.record(self._config, value, time_ms)
self._check_quotas(time_ms)
for parent in self._parents:
parent.record(value, time_ms) | 116,190 |
Register a compound statistic with this sensor which
yields multiple measurable quantities (like a histogram)
Arguments:
stat (AbstractCompoundStat): The stat to register
config (MetricConfig): The configuration for this stat.
If None then the stat will use the default configuration
for this sensor. | def add_compound(self, compound_stat, config=None):
if not compound_stat:
raise ValueError('compound stat must be non-empty')
self._stats.append(compound_stat)
for named_measurable in compound_stat.stats():
metric = KafkaMetric(named_measurable.name, named_measurable.stat,
config or self._config)
self._registry.register_metric(metric)
self._metrics.append(metric) | 116,192 |
Register a metric with this sensor
Arguments:
metric_name (MetricName): The name of the metric
stat (AbstractMeasurableStat): The statistic to keep
config (MetricConfig): A special configuration for this metric.
If None use the sensor default configuration. | def add(self, metric_name, stat, config=None):
with self._lock:
metric = KafkaMetric(metric_name, stat, config or self._config)
self._registry.register_metric(metric)
self._metrics.append(metric)
self._stats.append(stat) | 116,193 |
Fetch the current committed offsets for specified partitions
Arguments:
partitions (list of TopicPartition): partitions to fetch
Returns:
dict: {TopicPartition: OffsetAndMetadata} | def fetch_committed_offsets(self, partitions):
if not partitions:
return {}
while True:
self.ensure_coordinator_ready()
# contact coordinator to fetch committed offsets
future = self._send_offset_fetch_request(partitions)
self._client.poll(future=future)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000) | 116,207 |
Commit specific offsets asynchronously.
Arguments:
offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit
callback (callable, optional): called as callback(offsets, response)
response will be either an Exception or a OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future | def commit_offsets_async(self, offsets, callback=None):
self._invoke_completed_offset_commit_callbacks()
if not self.coordinator_unknown():
future = self._do_commit_offsets_async(offsets, callback)
else:
# we don't know the current coordinator, so try to find it and then
# send the commit or fail (we don't want recursive retries which can
# cause offset commits to arrive out of order). Note that there may
# be multiple offset commits chained to the same coordinator lookup
# request. This is fine because the listeners will be invoked in the
# same order that they were added. Note also that BaseCoordinator
# prevents multiple concurrent coordinator lookup requests.
future = self.lookup_coordinator()
future.add_callback(lambda r: functools.partial(self._do_commit_offsets_async, offsets, callback)())
if callback:
future.add_errback(lambda e: self.completed_offset_commits.appendleft((callback, offsets, e)))
# ensure the commit has a chance to be transmitted (without blocking on
# its completion). Note that commits are treated as heartbeats by the
# coordinator, so there is no need to explicitly allow heartbeats
# through delayed task execution.
self._client.poll(timeout_ms=0) # no wakeup if we add that feature
return future | 116,210 |
Commit specific offsets synchronously.
This method will retry until the commit completes successfully or an
unrecoverable error is encountered.
Arguments:
offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit
Raises error on failure | def commit_offsets_sync(self, offsets):
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
self._invoke_completed_offset_commit_callbacks()
if not offsets:
return
while True:
self.ensure_coordinator_ready()
future = self._send_offset_commit_request(offsets)
self._client.poll(future=future)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000) | 116,212 |
Commit offsets for the specified list of topics and partitions.
This is a non-blocking call which returns a request future that can be
polled in the case of a synchronous commit or ignored in the
asynchronous case.
Arguments:
offsets (dict of {TopicPartition: OffsetAndMetadata}): what should
be committed
Returns:
Future: indicating whether the commit was successful or not | def _send_offset_commit_request(self, offsets):
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
if not offsets:
log.debug('No offsets to commit')
return Future().success(None)
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
# create the offset commit request
offset_data = collections.defaultdict(dict)
for tp, offset in six.iteritems(offsets):
offset_data[tp.topic][tp.partition] = offset
if self._subscription.partitions_auto_assigned():
generation = self.generation()
else:
generation = Generation.NO_GENERATION
# if the generation is None, we are not part of an active group
# (and we expect to be). The only thing we can do is fail the commit
# and let the user rejoin the group in poll()
if self.config['api_version'] >= (0, 9) and generation is None:
return Future().failure(Errors.CommitFailedError())
if self.config['api_version'] >= (0, 9):
request = OffsetCommitRequest[2](
self.group_id,
generation.generation_id,
generation.member_id,
OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 2):
request = OffsetCommitRequest[1](
self.group_id, -1, '',
[(
topic, [(
partition,
offset.offset,
-1,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 1):
request = OffsetCommitRequest[0](
self.group_id,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
log.debug("Sending offset-commit request with %s for group %s to %s",
offsets, self.group_id, node_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())
_f.add_errback(self._failed_request, node_id, request, future)
return future | 116,214 |
Fetch the committed offsets for a set of partitions.
This is a non-blocking call. The returned future can be polled to get
the actual offsets returned from the broker.
Arguments:
partitions (list of TopicPartition): the partitions to fetch
Returns:
Future: resolves to dict of offsets: {TopicPartition: int} | def _send_offset_fetch_request(self, partitions):
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
if not partitions:
return Future().success({})
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
# Verify node is ready
if not self._client.ready(node_id):
log.debug("Node %s not ready -- failing offset fetch request",
node_id)
return Future().failure(Errors.NodeNotReadyError)
log.debug("Group %s fetching committed offsets for partitions: %s",
self.group_id, partitions)
# construct the request
topic_partitions = collections.defaultdict(set)
for tp in partitions:
topic_partitions[tp.topic].add(tp.partition)
if self.config['api_version'] >= (0, 8, 2):
request = OffsetFetchRequest[1](
self.group_id,
list(topic_partitions.items())
)
else:
request = OffsetFetchRequest[0](
self.group_id,
list(topic_partitions.items())
)
# send the request with a callback
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_fetch_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future | 116,216 |
Change the topic subscription.
Arguments:
topics (list of str): topics for subscription
Raises:
IllegalStateErrror: if assign_from_user has been used already
TypeError: if a topic is None or a non-str
ValueError: if a topic is an empty string or
- a topic name is '.' or '..' or
- a topic name does not consist of ASCII-characters/'-'/'_'/'.' | def change_subscription(self, topics):
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
if isinstance(topics, six.string_types):
topics = [topics]
if self.subscription == set(topics):
log.warning("subscription unchanged by change_subscription(%s)",
topics)
return
for t in topics:
self._ensure_valid_topic_name(t)
log.info('Updating subscribed topics to: %s', topics)
self.subscription = set(topics)
self._group_subscription.update(topics)
# Remove any assigned partitions which are no longer subscribed to
for tp in set(self.assignment.keys()):
if tp.topic not in self.subscription:
del self.assignment[tp] | 116,225 |
Add topics to the current group subscription.
This is used by the group leader to ensure that it receives metadata
updates for all topics that any member of the group is subscribed to.
Arguments:
topics (list of str): topics to add to the group subscription | def group_subscribe(self, topics):
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
self._group_subscription.update(topics) | 116,226 |
Update the assignment to the specified partitions
This method is called by the coordinator to dynamically assign
partitions based on the consumer's topic subscription. This is different
from assign_from_user() which directly sets the assignment from a
user-supplied TopicPartition list.
Arguments:
assignments (list of TopicPartition): partitions to assign to this
consumer instance. | def assign_from_subscribed(self, assignments):
if not self.partitions_auto_assigned():
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
for tp in assignments:
if tp.topic not in self.subscription:
raise ValueError("Assigned partition %s for non-subscribed topic." % (tp,))
# after rebalancing, we always reinitialize the assignment state
self.assignment.clear()
for tp in assignments:
self._add_assigned_partition(tp)
self.needs_fetch_committed_offsets = True
log.info("Updated partition assignment: %s", assignments) | 116,229 |
Mark partition for offset reset using specified or default strategy.
Arguments:
partition (TopicPartition): partition to mark
offset_reset_strategy (OffsetResetStrategy, optional) | def need_offset_reset(self, partition, offset_reset_strategy=None):
if offset_reset_strategy is None:
offset_reset_strategy = self._default_offset_reset_strategy
self.assignment[partition].await_reset(offset_reset_strategy) | 116,234 |
Close this producer.
Arguments:
timeout (float, optional): timeout in seconds to wait for completion. | def close(self, timeout=None):
# drop our atexit handler now to avoid leaks
self._unregister_cleanup()
if not hasattr(self, '_closed') or self._closed:
log.info('Kafka producer closed')
return
if timeout is None:
# threading.TIMEOUT_MAX is available in Python3.3+
timeout = getattr(threading, 'TIMEOUT_MAX', float('inf'))
if getattr(threading, 'TIMEOUT_MAX', False):
assert 0 <= timeout <= getattr(threading, 'TIMEOUT_MAX')
else:
assert timeout >= 0
log.info("Closing the Kafka producer with %s secs timeout.", timeout)
#first_exception = AtomicReference() # this will keep track of the first encountered exception
invoked_from_callback = bool(threading.current_thread() is self._sender)
if timeout > 0:
if invoked_from_callback:
log.warning("Overriding close timeout %s secs to 0 in order to"
" prevent useless blocking due to self-join. This"
" means you have incorrectly invoked close with a"
" non-zero timeout from the producer call-back.",
timeout)
else:
# Try to close gracefully.
if self._sender is not None:
self._sender.initiate_close()
self._sender.join(timeout)
if self._sender is not None and self._sender.is_alive():
log.info("Proceeding to force close the producer since pending"
" requests could not be completed within timeout %s.",
timeout)
self._sender.force_close()
# Only join the sender thread when not calling from callback.
if not invoked_from_callback:
self._sender.join()
self._metrics.close()
try:
self.config['key_serializer'].close()
except AttributeError:
pass
try:
self.config['value_serializer'].close()
except AttributeError:
pass
self._closed = True
log.debug("The Kafka producer has closed.") | 116,246 |
Wait for cluster metadata including partitions for the given topic to
be available.
Arguments:
topic (str): topic we want metadata for
max_wait (float): maximum time in secs for waiting on the metadata
Returns:
set: partition ids for the topic
Raises:
KafkaTimeoutError: if partitions for topic were not obtained before
specified max_wait timeout | def _wait_on_metadata(self, topic, max_wait):
# add topic to metadata topic list if it is not there already.
self._sender.add_topic(topic)
begin = time.time()
elapsed = 0.0
metadata_event = None
while True:
partitions = self._metadata.partitions_for_topic(topic)
if partitions is not None:
return partitions
if not metadata_event:
metadata_event = threading.Event()
log.debug("Requesting metadata update for topic %s", topic)
metadata_event.clear()
future = self._metadata.request_update()
future.add_both(lambda e, *args: e.set(), metadata_event)
self._sender.wakeup()
metadata_event.wait(max_wait - elapsed)
elapsed = time.time() - begin
if not metadata_event.is_set():
raise Errors.KafkaTimeoutError(
"Failed to update metadata after %.1f secs." % (max_wait,))
elif topic in self._metadata.unauthorized_topics:
raise Errors.TopicAuthorizationFailedError(topic)
else:
log.debug("_wait_on_metadata woke after %s secs.", elapsed) | 116,252 |
Update offsets using auto_offset_reset policy (smallest|largest)
Arguments:
partition (int): the partition for which offsets should be updated
Returns: Updated offset on success, None on failure | def reset_partition_offset(self, partition):
LATEST = -1
EARLIEST = -2
if self.auto_offset_reset == 'largest':
reqs = [OffsetRequestPayload(self.topic, partition, LATEST, 1)]
elif self.auto_offset_reset == 'smallest':
reqs = [OffsetRequestPayload(self.topic, partition, EARLIEST, 1)]
else:
# Let's raise an reasonable exception type if user calls
# outside of an exception context
if sys.exc_info() == (None, None, None):
raise OffsetOutOfRangeError('Cannot reset partition offsets without a '
'valid auto_offset_reset setting '
'(largest|smallest)')
# Otherwise we should re-raise the upstream exception
# b/c it typically includes additional data about
# the request that triggered it, and we do not want to drop that
raise # pylint: disable=E0704
# send_offset_request
log.info('Resetting topic-partition offset to %s for %s:%d',
self.auto_offset_reset, self.topic, partition)
try:
(resp, ) = self.client.send_offset_request(reqs)
except KafkaError as e:
log.error('%s sending offset request for %s:%d',
e.__class__.__name__, self.topic, partition)
else:
self.offsets[partition] = resp.offsets[0]
self.fetch_offsets[partition] = resp.offsets[0]
return resp.offsets[0] | 116,261 |
Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
num (int): Value to encode
Returns:
bytearray: Encoded presentation of integer with length from 1 to 10
bytes | def encode_varint_1(num):
# Shift sign to the end of number
num = (num << 1) ^ (num >> 63)
# Max 10 bytes. We assert those are allocated
buf = bytearray(10)
for i in range(10):
# 7 lowest bits from the number and set 8th if we still have pending
# bits left to encode
buf[i] = num & 0x7f | (0x80 if num > 0x7f else 0)
num = num >> 7
if num == 0:
break
else:
# Max size of endcoded double is 10 bytes for unsigned values
raise ValueError("Out of double range")
return buf[:i + 1] | 116,269 |
Decode an integer from a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
buffer (bytes-like): any object acceptable by ``memoryview``
pos (int): optional position to read from
Returns:
(int, int): Decoded int value and next read position | def decode_varint_1(buffer, pos=0):
value = 0
shift = 0
memview = memoryview(buffer)
for i in range(pos, pos + 10):
try:
byte = _read_byte(memview, i)
except IndexError:
raise ValueError("End of byte stream")
if byte & 0x80 != 0:
value |= (byte & 0x7f) << shift
shift += 7
else:
value |= byte << shift
break
else:
# Max size of endcoded double is 10 bytes for unsigned values
raise ValueError("Out of double range")
# Normalize sign
return (value >> 1) ^ -(value & 1), i + 1 | 116,277 |
Update CRC-32C checksum with data.
Args:
crc: 32-bit checksum to update as long.
data: byte array, string or iterable over bytes.
Returns:
32-bit updated CRC-32C as long. | def crc_update(crc, data):
if type(data) != array.array or data.itemsize != 1:
buf = array.array("B", data)
else:
buf = data
crc = crc ^ _MASK
for b in buf:
table_index = (crc ^ b) & 0xff
crc = (CRC_TABLE[table_index] ^ (crc >> 8)) & _MASK
return crc ^ _MASK | 116,309 |
Abort the batches that have been sitting in RecordAccumulator for
more than the configured request_timeout due to metadata being
unavailable.
Arguments:
request_timeout_ms (int): milliseconds to timeout
cluster (ClusterMetadata): current metadata for kafka cluster
Returns:
list of ProducerBatch that were expired | def abort_expired_batches(self, request_timeout_ms, cluster):
expired_batches = []
to_remove = []
count = 0
for tp in list(self._batches.keys()):
assert tp in self._tp_locks, 'TopicPartition not in locks dict'
# We only check if the batch should be expired if the partition
# does not have a batch in flight. This is to avoid the later
# batches get expired when an earlier batch is still in progress.
# This protection only takes effect when user sets
# max.in.flight.request.per.connection=1. Otherwise the expiration
# order is not guranteed.
if tp in self.muted:
continue
with self._tp_locks[tp]:
# iterate over the batches and expire them if they have stayed
# in accumulator for more than request_timeout_ms
dq = self._batches[tp]
for batch in dq:
is_full = bool(bool(batch != dq[-1]) or batch.records.is_full())
# check if the batch is expired
if batch.maybe_expire(request_timeout_ms,
self.config['retry_backoff_ms'],
self.config['linger_ms'],
is_full):
expired_batches.append(batch)
to_remove.append(batch)
count += 1
self.deallocate(batch)
else:
# Stop at the first batch that has not expired.
break
# Python does not allow us to mutate the dq during iteration
# Assuming expired batches are infrequent, this is better than
# creating a new copy of the deque for iteration on every loop
if to_remove:
for batch in to_remove:
dq.remove(batch)
to_remove = []
if expired_batches:
log.warning("Expired %d batches in accumulator", count) # trace
return expired_batches | 116,318 |
Close socket and fail all in-flight-requests.
Arguments:
error (Exception, optional): pending in-flight-requests
will be failed with this exception.
Default: kafka.errors.KafkaConnectionError. | def close(self, error=None):
if self.state is ConnectionStates.DISCONNECTED:
return
with self._lock:
if self.state is ConnectionStates.DISCONNECTED:
return
log.info('%s: Closing connection. %s', self, error or '')
self._update_reconnect_backoff()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
if error is None:
error = Errors.Cancelled(str(self))
ifrs = list(self.in_flight_requests.items())
self.in_flight_requests.clear()
self.state = ConnectionStates.DISCONNECTED
# To avoid race conditions and/or deadlocks
# keep a reference to the socket but leave it
# open until after the state_change_callback
# This should give clients a change to deregister
# the socket fd from selectors cleanly.
sock = self._sock
self._sock = None
# drop lock before state change callback and processing futures
self.config['state_change_callback'](self.node_id, sock, self)
sock.close()
for (_correlation_id, (future, _timestamp)) in ifrs:
future.failure(error) | 116,394 |
Complete or retry the given batch of records.
Arguments:
batch (RecordBatch): The record batch
error (Exception): The error (or None if none)
base_offset (int): The base offset assigned to the records if successful
timestamp_ms (int, optional): The timestamp returned by the broker for this batch | def _complete_batch(self, batch, error, base_offset, timestamp_ms=None):
# Standardize no-error to None
if error is Errors.NoError:
error = None
if error is not None and self._can_retry(batch, error):
# retry
log.warning("Got error produce response on topic-partition %s,"
" retrying (%d attempts left). Error: %s",
batch.topic_partition,
self.config['retries'] - batch.attempts - 1,
error)
self._accumulator.reenqueue(batch)
self._sensors.record_retries(batch.topic_partition.topic, batch.record_count)
else:
if error is Errors.TopicAuthorizationFailedError:
error = error(batch.topic_partition.topic)
# tell the user the result of their request
batch.done(base_offset, timestamp_ms, error)
self._accumulator.deallocate(batch)
if error is not None:
self._sensors.record_errors(batch.topic_partition.topic, batch.record_count)
if getattr(error, 'invalid_metadata', False):
self._metadata.request_update()
# Unmute the completed partition.
if self.config['guarantee_message_order']:
self._accumulator.muted.remove(batch.topic_partition) | 116,421 |
Transfer the record batches into a list of produce requests on a
per-node basis.
Arguments:
collated: {node_id: [RecordBatch]}
Returns:
dict: {node_id: ProduceRequest} (version depends on api_version) | def _create_produce_requests(self, collated):
requests = {}
for node_id, batches in six.iteritems(collated):
requests[node_id] = self._produce_request(
node_id, self.config['acks'],
self.config['request_timeout_ms'], batches)
return requests | 116,423 |
Get BrokerMetadata
Arguments:
broker_id (int): node_id for a broker to check
Returns:
BrokerMetadata or None if not found | def broker_metadata(self, broker_id):
return self._brokers.get(broker_id) or self._bootstrap_brokers.get(broker_id) | 116,439 |
Return set of all partitions for topic (whether available or not)
Arguments:
topic (str): topic to check for partitions
Returns:
set: {partition (int), ...} | def partitions_for_topic(self, topic):
if topic not in self._partitions:
return None
return set(self._partitions[topic].keys()) | 116,440 |
Return set of partitions with known leaders
Arguments:
topic (str): topic to check for partitions
Returns:
set: {partition (int), ...}
None if topic not found. | def available_partitions_for_topic(self, topic):
if topic not in self._partitions:
return None
return set([partition for partition, metadata
in six.iteritems(self._partitions[topic])
if metadata.leader != -1]) | 116,441 |
Get set of known topics.
Arguments:
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to
True the only way to receive records from an internal topic is
subscribing to it. Default True
Returns:
set: {topic (str), ...} | def topics(self, exclude_internal_topics=True):
topics = set(self._partitions.keys())
if exclude_internal_topics:
return topics - self.internal_topics
else:
return topics | 116,445 |
Update cluster state given a MetadataResponse.
Arguments:
metadata (MetadataResponse): broker response to a metadata request
Returns: None | def update_metadata(self, metadata):
# In the common case where we ask for a single topic and get back an
# error, we should fail the future
if len(metadata.topics) == 1 and metadata.topics[0][0] != 0:
error_code, topic = metadata.topics[0][:2]
error = Errors.for_code(error_code)(topic)
return self.failed_update(error)
if not metadata.brokers:
log.warning("No broker metadata found in MetadataResponse -- ignoring.")
return self.failed_update(Errors.MetadataEmptyBrokerList(metadata))
_new_brokers = {}
for broker in metadata.brokers:
if metadata.API_VERSION == 0:
node_id, host, port = broker
rack = None
else:
node_id, host, port, rack = broker
_new_brokers.update({
node_id: BrokerMetadata(node_id, host, port, rack)
})
if metadata.API_VERSION == 0:
_new_controller = None
else:
_new_controller = _new_brokers.get(metadata.controller_id)
_new_partitions = {}
_new_broker_partitions = collections.defaultdict(set)
_new_unauthorized_topics = set()
_new_internal_topics = set()
for topic_data in metadata.topics:
if metadata.API_VERSION == 0:
error_code, topic, partitions = topic_data
is_internal = False
else:
error_code, topic, is_internal, partitions = topic_data
if is_internal:
_new_internal_topics.add(topic)
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
_new_partitions[topic] = {}
for p_error, partition, leader, replicas, isr in partitions:
_new_partitions[topic][partition] = PartitionMetadata(
topic=topic, partition=partition, leader=leader,
replicas=replicas, isr=isr, error=p_error)
if leader != -1:
_new_broker_partitions[leader].add(
TopicPartition(topic, partition))
elif error_type is Errors.LeaderNotAvailableError:
log.warning("Topic %s is not available during auto-create"
" initialization", topic)
elif error_type is Errors.UnknownTopicOrPartitionError:
log.error("Topic %s not found in cluster metadata", topic)
elif error_type is Errors.TopicAuthorizationFailedError:
log.error("Topic %s is not authorized for this client", topic)
_new_unauthorized_topics.add(topic)
elif error_type is Errors.InvalidTopicError:
log.error("'%s' is not a valid topic name", topic)
else:
log.error("Error fetching metadata for topic %s: %s",
topic, error_type)
with self._lock:
self._brokers = _new_brokers
self.controller = _new_controller
self._partitions = _new_partitions
self._broker_partitions = _new_broker_partitions
self.unauthorized_topics = _new_unauthorized_topics
self.internal_topics = _new_internal_topics
f = None
if self._future:
f = self._future
self._future = None
self._need_update = False
now = time.time() * 1000
self._last_refresh_ms = now
self._last_successful_refresh_ms = now
if f:
f.success(self)
log.debug("Updated cluster metadata to %s", self)
for listener in self._listeners:
listener(self)
if self.need_all_topic_metadata:
# the listener may change the interested topics,
# which could cause another metadata refresh.
# If we have already fetched all topics, however,
# another fetch should be unnecessary.
self._need_update = False | 116,447 |
Update with metadata for a group coordinator
Arguments:
group (str): name of group from GroupCoordinatorRequest
response (GroupCoordinatorResponse): broker response
Returns:
bool: True if metadata is updated, False on error | def add_group_coordinator(self, group, response):
log.debug("Updating coordinator for %s: %s", group, response)
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
log.error("GroupCoordinatorResponse error: %s", error_type)
self._groups[group] = -1
return False
node_id = response.coordinator_id
coordinator = BrokerMetadata(
response.coordinator_id,
response.host,
response.port,
None)
# Assume that group coordinators are just brokers
# (this is true now, but could diverge in future)
if node_id not in self._brokers:
self._brokers[node_id] = coordinator
# If this happens, either brokers have moved without
# changing IDs, or our assumption above is wrong
else:
node = self._brokers[node_id]
if coordinator.host != node.host or coordinator.port != node.port:
log.error("GroupCoordinator metadata conflicts with existing"
" broker metadata. Coordinator: %s, Broker: %s",
coordinator, node)
self._groups[group] = node_id
return False
log.info("Group coordinator for %s is %s", group, coordinator)
self._groups[group] = node_id
return True | 116,448 |
Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block.
Returns:
dict: Map of topic to list of records (may be empty). | def _poll_once(self, timeout_ms, max_records):
self._coordinator.poll()
# Fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# If data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
records, partial = self._fetcher.fetched_records(max_records)
if records:
# Before returning the fetched records, we can send off the
# next round of fetches and avoid block waiting for their
# responses to enable pipelining while the user is handling the
# fetched records.
if not partial:
self._fetcher.send_fetches()
return records
# Send any new fetches (won't resend pending fetches)
self._fetcher.send_fetches()
timeout_ms = min(timeout_ms, self._coordinator.time_to_next_poll() * 1000)
self._client.poll(timeout_ms=timeout_ms)
# after the long poll, we should check whether the group needs to rebalance
# prior to returning data so that the group can stabilize faster
if self._coordinator.need_rejoin():
return {}
records, _ = self._fetcher.fetched_records(max_records)
return records | 116,459 |
Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): Partition to check
Returns:
int: Offset | def position(self, partition):
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
offset = self._subscription.assignment[partition].position
if offset is None:
self._update_fetch_positions([partition])
offset = self._subscription.assignment[partition].position
return offset | 116,460 |
Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause. | def pause(self, *partitions):
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition) | 116,462 |
Seek to the oldest available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned. | def seek_to_beginning(self, *partitions):
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to beginning of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST) | 116,464 |
Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned. | def seek_to_end(self, *partitions):
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST) | 116,465 |
Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
Arguments:
partitions (List[TopicPartition]): The partitions that need
updating fetch positions.
Raises:
NoOffsetForPartitionError: If no offset is stored for a given
partition and no offset reset policy is defined. | def _update_fetch_positions(self, partitions):
# Lookup any positions for partitions which are awaiting reset (which may be the
# case if the user called :meth:`seek_to_beginning` or :meth:`seek_to_end`. We do
# this check first to avoid an unnecessary lookup of committed offsets (which
# typically occurs when the user is manually assigning partitions and managing
# their own offsets).
self._fetcher.reset_offsets_if_needed(partitions)
if not self._subscription.has_all_fetch_positions():
# if we still don't have offsets for all partitions, then we should either seek
# to the last committed position or reset using the auto reset policy
if (self.config['api_version'] >= (0, 8, 1) and
self.config['group_id'] is not None):
# first refresh commits for all assigned partitions
self._coordinator.refresh_committed_offsets_if_needed()
# Then, do any offset lookups in case some positions are not known
self._fetcher.update_fetch_positions(partitions) | 116,472 |
Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node | def ready(self, node_id, metadata_priority=True):
self.maybe_connect(node_id)
return self.is_ready(node_id, metadata_priority=metadata_priority) | 116,489 |
Close one or all broker connections.
Arguments:
node_id (int, optional): the id of the node to close | def close(self, node_id=None):
with self._lock:
if node_id is None:
self._close()
conns = list(self._conns.values())
self._conns.clear()
for conn in conns:
conn.close()
elif node_id in self._conns:
self._conns.pop(node_id).close()
else:
log.warning("Node %s not found in current connection list; skipping", node_id)
return | 116,492 |
Check whether the node connection has been disconnected or failed.
A disconnected node has either been closed or has failed. Connection
failures are usually transient and can be resumed in the next ready()
call, but there are cases where transient failures need to be caught
and re-acted upon.
Arguments:
node_id (int): the id of the node to check
Returns:
bool: True iff the node exists and is disconnected | def is_disconnected(self, node_id):
conn = self._conns.get(node_id)
if conn is None:
return False
return conn.disconnected() | 116,493 |