id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
900
eandersson/amqpstorm
amqpstorm/uri_connection.py
UriConnection._parse_uri_options
def _parse_uri_options(self, parsed_uri, use_ssl=False, ssl_options=None): """Parse the uri options. :param parsed_uri: :param bool use_ssl: :return: """ ssl_options = ssl_options or {} kwargs = urlparse.parse_qs(parsed_uri.query) vhost = urlparse.unquote(parsed_uri.path[1:]) or DEFAULT_VIRTUAL_HOST options = { 'ssl': use_ssl, 'virtual_host': vhost, 'heartbeat': int(kwargs.pop('heartbeat', [DEFAULT_HEARTBEAT_INTERVAL])[0]), 'timeout': int(kwargs.pop('timeout', [DEFAULT_SOCKET_TIMEOUT])[0]) } if use_ssl: if not compatibility.SSL_SUPPORTED: raise AMQPConnectionError( 'Python not compiled with support ' 'for TLSv1 or higher' ) ssl_options.update(self._parse_ssl_options(kwargs)) options['ssl_options'] = ssl_options return options
python
def _parse_uri_options(self, parsed_uri, use_ssl=False, ssl_options=None): ssl_options = ssl_options or {} kwargs = urlparse.parse_qs(parsed_uri.query) vhost = urlparse.unquote(parsed_uri.path[1:]) or DEFAULT_VIRTUAL_HOST options = { 'ssl': use_ssl, 'virtual_host': vhost, 'heartbeat': int(kwargs.pop('heartbeat', [DEFAULT_HEARTBEAT_INTERVAL])[0]), 'timeout': int(kwargs.pop('timeout', [DEFAULT_SOCKET_TIMEOUT])[0]) } if use_ssl: if not compatibility.SSL_SUPPORTED: raise AMQPConnectionError( 'Python not compiled with support ' 'for TLSv1 or higher' ) ssl_options.update(self._parse_ssl_options(kwargs)) options['ssl_options'] = ssl_options return options
[ "def", "_parse_uri_options", "(", "self", ",", "parsed_uri", ",", "use_ssl", "=", "False", ",", "ssl_options", "=", "None", ")", ":", "ssl_options", "=", "ssl_options", "or", "{", "}", "kwargs", "=", "urlparse", ".", "parse_qs", "(", "parsed_uri", ".", "query", ")", "vhost", "=", "urlparse", ".", "unquote", "(", "parsed_uri", ".", "path", "[", "1", ":", "]", ")", "or", "DEFAULT_VIRTUAL_HOST", "options", "=", "{", "'ssl'", ":", "use_ssl", ",", "'virtual_host'", ":", "vhost", ",", "'heartbeat'", ":", "int", "(", "kwargs", ".", "pop", "(", "'heartbeat'", ",", "[", "DEFAULT_HEARTBEAT_INTERVAL", "]", ")", "[", "0", "]", ")", ",", "'timeout'", ":", "int", "(", "kwargs", ".", "pop", "(", "'timeout'", ",", "[", "DEFAULT_SOCKET_TIMEOUT", "]", ")", "[", "0", "]", ")", "}", "if", "use_ssl", ":", "if", "not", "compatibility", ".", "SSL_SUPPORTED", ":", "raise", "AMQPConnectionError", "(", "'Python not compiled with support '", "'for TLSv1 or higher'", ")", "ssl_options", ".", "update", "(", "self", ".", "_parse_ssl_options", "(", "kwargs", ")", ")", "options", "[", "'ssl_options'", "]", "=", "ssl_options", "return", "options" ]
Parse the uri options. :param parsed_uri: :param bool use_ssl: :return:
[ "Parse", "the", "uri", "options", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/uri_connection.py#L51-L77
901
eandersson/amqpstorm
amqpstorm/uri_connection.py
UriConnection._parse_ssl_options
def _parse_ssl_options(self, ssl_kwargs): """Parse TLS Options. :param ssl_kwargs: :rtype: dict """ ssl_options = {} for key in ssl_kwargs: if key not in compatibility.SSL_OPTIONS: LOGGER.warning('invalid option: %s', key) continue if 'ssl_version' in key: value = self._get_ssl_version(ssl_kwargs[key][0]) elif 'cert_reqs' in key: value = self._get_ssl_validation(ssl_kwargs[key][0]) else: value = ssl_kwargs[key][0] ssl_options[key] = value return ssl_options
python
def _parse_ssl_options(self, ssl_kwargs): ssl_options = {} for key in ssl_kwargs: if key not in compatibility.SSL_OPTIONS: LOGGER.warning('invalid option: %s', key) continue if 'ssl_version' in key: value = self._get_ssl_version(ssl_kwargs[key][0]) elif 'cert_reqs' in key: value = self._get_ssl_validation(ssl_kwargs[key][0]) else: value = ssl_kwargs[key][0] ssl_options[key] = value return ssl_options
[ "def", "_parse_ssl_options", "(", "self", ",", "ssl_kwargs", ")", ":", "ssl_options", "=", "{", "}", "for", "key", "in", "ssl_kwargs", ":", "if", "key", "not", "in", "compatibility", ".", "SSL_OPTIONS", ":", "LOGGER", ".", "warning", "(", "'invalid option: %s'", ",", "key", ")", "continue", "if", "'ssl_version'", "in", "key", ":", "value", "=", "self", ".", "_get_ssl_version", "(", "ssl_kwargs", "[", "key", "]", "[", "0", "]", ")", "elif", "'cert_reqs'", "in", "key", ":", "value", "=", "self", ".", "_get_ssl_validation", "(", "ssl_kwargs", "[", "key", "]", "[", "0", "]", ")", "else", ":", "value", "=", "ssl_kwargs", "[", "key", "]", "[", "0", "]", "ssl_options", "[", "key", "]", "=", "value", "return", "ssl_options" ]
Parse TLS Options. :param ssl_kwargs: :rtype: dict
[ "Parse", "TLS", "Options", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/uri_connection.py#L79-L97
902
eandersson/amqpstorm
amqpstorm/uri_connection.py
UriConnection._get_ssl_version
def _get_ssl_version(self, value): """Get the TLS Version. :param str value: :return: TLS Version """ return self._get_ssl_attribute(value, compatibility.SSL_VERSIONS, ssl.PROTOCOL_TLSv1, 'ssl_options: ssl_version \'%s\' not ' 'found falling back to PROTOCOL_TLSv1.')
python
def _get_ssl_version(self, value): return self._get_ssl_attribute(value, compatibility.SSL_VERSIONS, ssl.PROTOCOL_TLSv1, 'ssl_options: ssl_version \'%s\' not ' 'found falling back to PROTOCOL_TLSv1.')
[ "def", "_get_ssl_version", "(", "self", ",", "value", ")", ":", "return", "self", ".", "_get_ssl_attribute", "(", "value", ",", "compatibility", ".", "SSL_VERSIONS", ",", "ssl", ".", "PROTOCOL_TLSv1", ",", "'ssl_options: ssl_version \\'%s\\' not '", "'found falling back to PROTOCOL_TLSv1.'", ")" ]
Get the TLS Version. :param str value: :return: TLS Version
[ "Get", "the", "TLS", "Version", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/uri_connection.py#L99-L108
903
eandersson/amqpstorm
amqpstorm/uri_connection.py
UriConnection._get_ssl_validation
def _get_ssl_validation(self, value): """Get the TLS Validation option. :param str value: :return: TLS Certificate Options """ return self._get_ssl_attribute(value, compatibility.SSL_CERT_MAP, ssl.CERT_NONE, 'ssl_options: cert_reqs \'%s\' not ' 'found falling back to CERT_NONE.')
python
def _get_ssl_validation(self, value): return self._get_ssl_attribute(value, compatibility.SSL_CERT_MAP, ssl.CERT_NONE, 'ssl_options: cert_reqs \'%s\' not ' 'found falling back to CERT_NONE.')
[ "def", "_get_ssl_validation", "(", "self", ",", "value", ")", ":", "return", "self", ".", "_get_ssl_attribute", "(", "value", ",", "compatibility", ".", "SSL_CERT_MAP", ",", "ssl", ".", "CERT_NONE", ",", "'ssl_options: cert_reqs \\'%s\\' not '", "'found falling back to CERT_NONE.'", ")" ]
Get the TLS Validation option. :param str value: :return: TLS Certificate Options
[ "Get", "the", "TLS", "Validation", "option", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/uri_connection.py#L110-L119
904
eandersson/amqpstorm
amqpstorm/uri_connection.py
UriConnection._get_ssl_attribute
def _get_ssl_attribute(value, mapping, default_value, warning_message): """Get the TLS attribute based on the compatibility mapping. If no valid attribute can be found, fall-back on default and display a warning. :param str value: :param dict mapping: Dictionary based mapping :param default_value: Default fall-back value :param str warning_message: Warning message :return: """ for key in mapping: if not key.endswith(value.lower()): continue return mapping[key] LOGGER.warning(warning_message, value) return default_value
python
def _get_ssl_attribute(value, mapping, default_value, warning_message): for key in mapping: if not key.endswith(value.lower()): continue return mapping[key] LOGGER.warning(warning_message, value) return default_value
[ "def", "_get_ssl_attribute", "(", "value", ",", "mapping", ",", "default_value", ",", "warning_message", ")", ":", "for", "key", "in", "mapping", ":", "if", "not", "key", ".", "endswith", "(", "value", ".", "lower", "(", ")", ")", ":", "continue", "return", "mapping", "[", "key", "]", "LOGGER", ".", "warning", "(", "warning_message", ",", "value", ")", "return", "default_value" ]
Get the TLS attribute based on the compatibility mapping. If no valid attribute can be found, fall-back on default and display a warning. :param str value: :param dict mapping: Dictionary based mapping :param default_value: Default fall-back value :param str warning_message: Warning message :return:
[ "Get", "the", "TLS", "attribute", "based", "on", "the", "compatibility", "mapping", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/uri_connection.py#L122-L139
905
eandersson/amqpstorm
amqpstorm/management/api.py
ManagementApi.top
def top(self): """Top Processes. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: list """ nodes = [] for node in self.nodes(): nodes.append(self.http_client.get(API_TOP % node['name'])) return nodes
python
def top(self): nodes = [] for node in self.nodes(): nodes.append(self.http_client.get(API_TOP % node['name'])) return nodes
[ "def", "top", "(", "self", ")", ":", "nodes", "=", "[", "]", "for", "node", "in", "self", ".", "nodes", "(", ")", ":", "nodes", ".", "append", "(", "self", ".", "http_client", ".", "get", "(", "API_TOP", "%", "node", "[", "'name'", "]", ")", ")", "return", "nodes" ]
Top Processes. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: list
[ "Top", "Processes", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/api.py#L133-L144
906
eandersson/amqpstorm
amqpstorm/basic.py
Basic.qos
def qos(self, prefetch_count=0, prefetch_size=0, global_=False): """Specify quality of service. :param int prefetch_count: Prefetch window in messages :param int/long prefetch_size: Prefetch window in octets :param bool global_: Apply to entire connection :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_integer(prefetch_count): raise AMQPInvalidArgument('prefetch_count should be an integer') elif not compatibility.is_integer(prefetch_size): raise AMQPInvalidArgument('prefetch_size should be an integer') elif not isinstance(global_, bool): raise AMQPInvalidArgument('global_ should be a boolean') qos_frame = specification.Basic.Qos(prefetch_count=prefetch_count, prefetch_size=prefetch_size, global_=global_) return self._channel.rpc_request(qos_frame)
python
def qos(self, prefetch_count=0, prefetch_size=0, global_=False): if not compatibility.is_integer(prefetch_count): raise AMQPInvalidArgument('prefetch_count should be an integer') elif not compatibility.is_integer(prefetch_size): raise AMQPInvalidArgument('prefetch_size should be an integer') elif not isinstance(global_, bool): raise AMQPInvalidArgument('global_ should be a boolean') qos_frame = specification.Basic.Qos(prefetch_count=prefetch_count, prefetch_size=prefetch_size, global_=global_) return self._channel.rpc_request(qos_frame)
[ "def", "qos", "(", "self", ",", "prefetch_count", "=", "0", ",", "prefetch_size", "=", "0", ",", "global_", "=", "False", ")", ":", "if", "not", "compatibility", ".", "is_integer", "(", "prefetch_count", ")", ":", "raise", "AMQPInvalidArgument", "(", "'prefetch_count should be an integer'", ")", "elif", "not", "compatibility", ".", "is_integer", "(", "prefetch_size", ")", ":", "raise", "AMQPInvalidArgument", "(", "'prefetch_size should be an integer'", ")", "elif", "not", "isinstance", "(", "global_", ",", "bool", ")", ":", "raise", "AMQPInvalidArgument", "(", "'global_ should be a boolean'", ")", "qos_frame", "=", "specification", ".", "Basic", ".", "Qos", "(", "prefetch_count", "=", "prefetch_count", ",", "prefetch_size", "=", "prefetch_size", ",", "global_", "=", "global_", ")", "return", "self", ".", "_channel", ".", "rpc_request", "(", "qos_frame", ")" ]
Specify quality of service. :param int prefetch_count: Prefetch window in messages :param int/long prefetch_size: Prefetch window in octets :param bool global_: Apply to entire connection :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
[ "Specify", "quality", "of", "service", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L28-L51
907
eandersson/amqpstorm
amqpstorm/basic.py
Basic.get
def get(self, queue='', no_ack=False, to_dict=False, auto_decode=True): """Fetch a single message. :param str queue: Queue name :param bool no_ack: No acknowledgement needed :param bool to_dict: Should incoming messages be converted to a dictionary before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :returns: Returns a single message, as long as there is a message in the queue. If no message is available, returns None. :rtype: dict|Message|None """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not isinstance(no_ack, bool): raise AMQPInvalidArgument('no_ack should be a boolean') elif self._channel.consumer_tags: raise AMQPChannelError("Cannot call 'get' when channel is " "set to consume") get_frame = specification.Basic.Get(queue=queue, no_ack=no_ack) with self._channel.lock and self._channel.rpc.lock: message = self._get_message(get_frame, auto_decode=auto_decode) if message and to_dict: return message.to_dict() return message
python
def get(self, queue='', no_ack=False, to_dict=False, auto_decode=True): if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not isinstance(no_ack, bool): raise AMQPInvalidArgument('no_ack should be a boolean') elif self._channel.consumer_tags: raise AMQPChannelError("Cannot call 'get' when channel is " "set to consume") get_frame = specification.Basic.Get(queue=queue, no_ack=no_ack) with self._channel.lock and self._channel.rpc.lock: message = self._get_message(get_frame, auto_decode=auto_decode) if message and to_dict: return message.to_dict() return message
[ "def", "get", "(", "self", ",", "queue", "=", "''", ",", "no_ack", "=", "False", ",", "to_dict", "=", "False", ",", "auto_decode", "=", "True", ")", ":", "if", "not", "compatibility", ".", "is_string", "(", "queue", ")", ":", "raise", "AMQPInvalidArgument", "(", "'queue should be a string'", ")", "elif", "not", "isinstance", "(", "no_ack", ",", "bool", ")", ":", "raise", "AMQPInvalidArgument", "(", "'no_ack should be a boolean'", ")", "elif", "self", ".", "_channel", ".", "consumer_tags", ":", "raise", "AMQPChannelError", "(", "\"Cannot call 'get' when channel is \"", "\"set to consume\"", ")", "get_frame", "=", "specification", ".", "Basic", ".", "Get", "(", "queue", "=", "queue", ",", "no_ack", "=", "no_ack", ")", "with", "self", ".", "_channel", ".", "lock", "and", "self", ".", "_channel", ".", "rpc", ".", "lock", ":", "message", "=", "self", ".", "_get_message", "(", "get_frame", ",", "auto_decode", "=", "auto_decode", ")", "if", "message", "and", "to_dict", ":", "return", "message", ".", "to_dict", "(", ")", "return", "message" ]
Fetch a single message. :param str queue: Queue name :param bool no_ack: No acknowledgement needed :param bool to_dict: Should incoming messages be converted to a dictionary before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :returns: Returns a single message, as long as there is a message in the queue. If no message is available, returns None. :rtype: dict|Message|None
[ "Fetch", "a", "single", "message", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L53-L85
908
eandersson/amqpstorm
amqpstorm/basic.py
Basic.recover
def recover(self, requeue=False): """Redeliver unacknowledged messages. :param bool requeue: Re-queue the messages :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') recover_frame = specification.Basic.Recover(requeue=requeue) return self._channel.rpc_request(recover_frame)
python
def recover(self, requeue=False): if not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') recover_frame = specification.Basic.Recover(requeue=requeue) return self._channel.rpc_request(recover_frame)
[ "def", "recover", "(", "self", ",", "requeue", "=", "False", ")", ":", "if", "not", "isinstance", "(", "requeue", ",", "bool", ")", ":", "raise", "AMQPInvalidArgument", "(", "'requeue should be a boolean'", ")", "recover_frame", "=", "specification", ".", "Basic", ".", "Recover", "(", "requeue", "=", "requeue", ")", "return", "self", ".", "_channel", ".", "rpc_request", "(", "recover_frame", ")" ]
Redeliver unacknowledged messages. :param bool requeue: Re-queue the messages :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
[ "Redeliver", "unacknowledged", "messages", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L87-L102
909
eandersson/amqpstorm
amqpstorm/basic.py
Basic.cancel
def cancel(self, consumer_tag=''): """Cancel a queue consumer. :param str consumer_tag: Consumer tag :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(consumer_tag): raise AMQPInvalidArgument('consumer_tag should be a string') cancel_frame = specification.Basic.Cancel(consumer_tag=consumer_tag) result = self._channel.rpc_request(cancel_frame) self._channel.remove_consumer_tag(consumer_tag) return result
python
def cancel(self, consumer_tag=''): if not compatibility.is_string(consumer_tag): raise AMQPInvalidArgument('consumer_tag should be a string') cancel_frame = specification.Basic.Cancel(consumer_tag=consumer_tag) result = self._channel.rpc_request(cancel_frame) self._channel.remove_consumer_tag(consumer_tag) return result
[ "def", "cancel", "(", "self", ",", "consumer_tag", "=", "''", ")", ":", "if", "not", "compatibility", ".", "is_string", "(", "consumer_tag", ")", ":", "raise", "AMQPInvalidArgument", "(", "'consumer_tag should be a string'", ")", "cancel_frame", "=", "specification", ".", "Basic", ".", "Cancel", "(", "consumer_tag", "=", "consumer_tag", ")", "result", "=", "self", ".", "_channel", ".", "rpc_request", "(", "cancel_frame", ")", "self", ".", "_channel", ".", "remove_consumer_tag", "(", "consumer_tag", ")", "return", "result" ]
Cancel a queue consumer. :param str consumer_tag: Consumer tag :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
[ "Cancel", "a", "queue", "consumer", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L143-L160
910
eandersson/amqpstorm
amqpstorm/basic.py
Basic.reject
def reject(self, delivery_tag=0, requeue=True): """Reject Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') reject_frame = specification.Basic.Reject(delivery_tag=delivery_tag, requeue=requeue) self._channel.write_frame(reject_frame)
python
def reject(self, delivery_tag=0, requeue=True): if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') reject_frame = specification.Basic.Reject(delivery_tag=delivery_tag, requeue=requeue) self._channel.write_frame(reject_frame)
[ "def", "reject", "(", "self", ",", "delivery_tag", "=", "0", ",", "requeue", "=", "True", ")", ":", "if", "not", "compatibility", ".", "is_integer", "(", "delivery_tag", ")", ":", "raise", "AMQPInvalidArgument", "(", "'delivery_tag should be an integer'", ")", "elif", "not", "isinstance", "(", "requeue", ",", "bool", ")", ":", "raise", "AMQPInvalidArgument", "(", "'requeue should be a boolean'", ")", "reject_frame", "=", "specification", ".", "Basic", ".", "Reject", "(", "delivery_tag", "=", "delivery_tag", ",", "requeue", "=", "requeue", ")", "self", ".", "_channel", ".", "write_frame", "(", "reject_frame", ")" ]
Reject Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
[ "Reject", "Message", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L247-L266
911
eandersson/amqpstorm
amqpstorm/basic.py
Basic._consume_add_and_get_tag
def _consume_add_and_get_tag(self, consume_rpc_result): """Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str """ consumer_tag = consume_rpc_result['consumer_tag'] self._channel.add_consumer_tag(consumer_tag) return consumer_tag
python
def _consume_add_and_get_tag(self, consume_rpc_result): consumer_tag = consume_rpc_result['consumer_tag'] self._channel.add_consumer_tag(consumer_tag) return consumer_tag
[ "def", "_consume_add_and_get_tag", "(", "self", ",", "consume_rpc_result", ")", ":", "consumer_tag", "=", "consume_rpc_result", "[", "'consumer_tag'", "]", "self", ".", "_channel", ".", "add_consumer_tag", "(", "consumer_tag", ")", "return", "consumer_tag" ]
Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str
[ "Add", "the", "tag", "to", "the", "channel", "and", "return", "it", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L268-L277
912
eandersson/amqpstorm
amqpstorm/basic.py
Basic._consume_rpc_request
def _consume_rpc_request(self, arguments, consumer_tag, exclusive, no_ack, no_local, queue): """Create a Consume Frame and execute a RPC request. :param str queue: Queue name :param str consumer_tag: Consumer tag :param bool no_local: Do not deliver own messages :param bool no_ack: No acknowledgement needed :param bool exclusive: Request exclusive access :param dict arguments: Consume key/value arguments :rtype: dict """ consume_frame = specification.Basic.Consume(queue=queue, consumer_tag=consumer_tag, exclusive=exclusive, no_local=no_local, no_ack=no_ack, arguments=arguments) return self._channel.rpc_request(consume_frame)
python
def _consume_rpc_request(self, arguments, consumer_tag, exclusive, no_ack, no_local, queue): consume_frame = specification.Basic.Consume(queue=queue, consumer_tag=consumer_tag, exclusive=exclusive, no_local=no_local, no_ack=no_ack, arguments=arguments) return self._channel.rpc_request(consume_frame)
[ "def", "_consume_rpc_request", "(", "self", ",", "arguments", ",", "consumer_tag", ",", "exclusive", ",", "no_ack", ",", "no_local", ",", "queue", ")", ":", "consume_frame", "=", "specification", ".", "Basic", ".", "Consume", "(", "queue", "=", "queue", ",", "consumer_tag", "=", "consumer_tag", ",", "exclusive", "=", "exclusive", ",", "no_local", "=", "no_local", ",", "no_ack", "=", "no_ack", ",", "arguments", "=", "arguments", ")", "return", "self", ".", "_channel", ".", "rpc_request", "(", "consume_frame", ")" ]
Create a Consume Frame and execute a RPC request. :param str queue: Queue name :param str consumer_tag: Consumer tag :param bool no_local: Do not deliver own messages :param bool no_ack: No acknowledgement needed :param bool exclusive: Request exclusive access :param dict arguments: Consume key/value arguments :rtype: dict
[ "Create", "a", "Consume", "Frame", "and", "execute", "a", "RPC", "request", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L279-L298
913
eandersson/amqpstorm
amqpstorm/basic.py
Basic._validate_publish_parameters
def _validate_publish_parameters(body, exchange, immediate, mandatory, properties, routing_key): """Validate Publish Parameters. :param bytes|str|unicode body: Message payload :param str routing_key: Message routing key :param str exchange: The exchange to publish the message to :param dict properties: Message properties :param bool mandatory: Requires the message is published :param bool immediate: Request immediate delivery :raises AMQPInvalidArgument: Invalid Parameters :return: """ if not compatibility.is_string(body): raise AMQPInvalidArgument('body should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif properties is not None and not isinstance(properties, dict): raise AMQPInvalidArgument('properties should be a dict or None') elif not isinstance(mandatory, bool): raise AMQPInvalidArgument('mandatory should be a boolean') elif not isinstance(immediate, bool): raise AMQPInvalidArgument('immediate should be a boolean')
python
def _validate_publish_parameters(body, exchange, immediate, mandatory, properties, routing_key): if not compatibility.is_string(body): raise AMQPInvalidArgument('body should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif properties is not None and not isinstance(properties, dict): raise AMQPInvalidArgument('properties should be a dict or None') elif not isinstance(mandatory, bool): raise AMQPInvalidArgument('mandatory should be a boolean') elif not isinstance(immediate, bool): raise AMQPInvalidArgument('immediate should be a boolean')
[ "def", "_validate_publish_parameters", "(", "body", ",", "exchange", ",", "immediate", ",", "mandatory", ",", "properties", ",", "routing_key", ")", ":", "if", "not", "compatibility", ".", "is_string", "(", "body", ")", ":", "raise", "AMQPInvalidArgument", "(", "'body should be a string'", ")", "elif", "not", "compatibility", ".", "is_string", "(", "routing_key", ")", ":", "raise", "AMQPInvalidArgument", "(", "'routing_key should be a string'", ")", "elif", "not", "compatibility", ".", "is_string", "(", "exchange", ")", ":", "raise", "AMQPInvalidArgument", "(", "'exchange should be a string'", ")", "elif", "properties", "is", "not", "None", "and", "not", "isinstance", "(", "properties", ",", "dict", ")", ":", "raise", "AMQPInvalidArgument", "(", "'properties should be a dict or None'", ")", "elif", "not", "isinstance", "(", "mandatory", ",", "bool", ")", ":", "raise", "AMQPInvalidArgument", "(", "'mandatory should be a boolean'", ")", "elif", "not", "isinstance", "(", "immediate", ",", "bool", ")", ":", "raise", "AMQPInvalidArgument", "(", "'immediate should be a boolean'", ")" ]
Validate Publish Parameters. :param bytes|str|unicode body: Message payload :param str routing_key: Message routing key :param str exchange: The exchange to publish the message to :param dict properties: Message properties :param bool mandatory: Requires the message is published :param bool immediate: Request immediate delivery :raises AMQPInvalidArgument: Invalid Parameters :return:
[ "Validate", "Publish", "Parameters", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L301-L327
914
eandersson/amqpstorm
amqpstorm/basic.py
Basic._handle_utf8_payload
def _handle_utf8_payload(body, properties): """Update the Body and Properties to the appropriate encoding. :param bytes|str|unicode body: Message payload :param dict properties: Message properties :return: """ if 'content_encoding' not in properties: properties['content_encoding'] = 'utf-8' encoding = properties['content_encoding'] if compatibility.is_unicode(body): body = body.encode(encoding) elif compatibility.PYTHON3 and isinstance(body, str): body = bytes(body, encoding=encoding) return body
python
def _handle_utf8_payload(body, properties): if 'content_encoding' not in properties: properties['content_encoding'] = 'utf-8' encoding = properties['content_encoding'] if compatibility.is_unicode(body): body = body.encode(encoding) elif compatibility.PYTHON3 and isinstance(body, str): body = bytes(body, encoding=encoding) return body
[ "def", "_handle_utf8_payload", "(", "body", ",", "properties", ")", ":", "if", "'content_encoding'", "not", "in", "properties", ":", "properties", "[", "'content_encoding'", "]", "=", "'utf-8'", "encoding", "=", "properties", "[", "'content_encoding'", "]", "if", "compatibility", ".", "is_unicode", "(", "body", ")", ":", "body", "=", "body", ".", "encode", "(", "encoding", ")", "elif", "compatibility", ".", "PYTHON3", "and", "isinstance", "(", "body", ",", "str", ")", ":", "body", "=", "bytes", "(", "body", ",", "encoding", "=", "encoding", ")", "return", "body" ]
Update the Body and Properties to the appropriate encoding. :param bytes|str|unicode body: Message payload :param dict properties: Message properties :return:
[ "Update", "the", "Body", "and", "Properties", "to", "the", "appropriate", "encoding", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L330-L345
915
eandersson/amqpstorm
amqpstorm/basic.py
Basic._get_message
def _get_message(self, get_frame, auto_decode): """Get and return a message using a Basic.Get frame. :param Basic.Get get_frame: :param bool auto_decode: Auto-decode strings when possible. :rtype: Message """ message_uuid = self._channel.rpc.register_request( get_frame.valid_responses + ['ContentHeader', 'ContentBody'] ) try: self._channel.write_frame(get_frame) get_ok_frame = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if isinstance(get_ok_frame, specification.Basic.GetEmpty): return None content_header = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) body = self._get_content_body(message_uuid, content_header.body_size) finally: self._channel.rpc.remove(message_uuid) return Message(channel=self._channel, body=body, method=dict(get_ok_frame), properties=dict(content_header.properties), auto_decode=auto_decode)
python
def _get_message(self, get_frame, auto_decode): message_uuid = self._channel.rpc.register_request( get_frame.valid_responses + ['ContentHeader', 'ContentBody'] ) try: self._channel.write_frame(get_frame) get_ok_frame = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if isinstance(get_ok_frame, specification.Basic.GetEmpty): return None content_header = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) body = self._get_content_body(message_uuid, content_header.body_size) finally: self._channel.rpc.remove(message_uuid) return Message(channel=self._channel, body=body, method=dict(get_ok_frame), properties=dict(content_header.properties), auto_decode=auto_decode)
[ "def", "_get_message", "(", "self", ",", "get_frame", ",", "auto_decode", ")", ":", "message_uuid", "=", "self", ".", "_channel", ".", "rpc", ".", "register_request", "(", "get_frame", ".", "valid_responses", "+", "[", "'ContentHeader'", ",", "'ContentBody'", "]", ")", "try", ":", "self", ".", "_channel", ".", "write_frame", "(", "get_frame", ")", "get_ok_frame", "=", "self", ".", "_channel", ".", "rpc", ".", "get_request", "(", "message_uuid", ",", "raw", "=", "True", ",", "multiple", "=", "True", ")", "if", "isinstance", "(", "get_ok_frame", ",", "specification", ".", "Basic", ".", "GetEmpty", ")", ":", "return", "None", "content_header", "=", "self", ".", "_channel", ".", "rpc", ".", "get_request", "(", "message_uuid", ",", "raw", "=", "True", ",", "multiple", "=", "True", ")", "body", "=", "self", ".", "_get_content_body", "(", "message_uuid", ",", "content_header", ".", "body_size", ")", "finally", ":", "self", ".", "_channel", ".", "rpc", ".", "remove", "(", "message_uuid", ")", "return", "Message", "(", "channel", "=", "self", ".", "_channel", ",", "body", "=", "body", ",", "method", "=", "dict", "(", "get_ok_frame", ")", ",", "properties", "=", "dict", "(", "content_header", ".", "properties", ")", ",", "auto_decode", "=", "auto_decode", ")" ]
Get and return a message using a Basic.Get frame. :param Basic.Get get_frame: :param bool auto_decode: Auto-decode strings when possible. :rtype: Message
[ "Get", "and", "return", "a", "message", "using", "a", "Basic", ".", "Get", "frame", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L347-L376
916
eandersson/amqpstorm
amqpstorm/basic.py
Basic._publish_confirm
def _publish_confirm(self, frames_out): """Confirm that message was published successfully. :param list frames_out: :rtype: bool """ confirm_uuid = self._channel.rpc.register_request(['Basic.Ack', 'Basic.Nack']) self._channel.write_frames(frames_out) result = self._channel.rpc.get_request(confirm_uuid, raw=True) self._channel.check_for_errors() if isinstance(result, specification.Basic.Ack): return True return False
python
def _publish_confirm(self, frames_out): confirm_uuid = self._channel.rpc.register_request(['Basic.Ack', 'Basic.Nack']) self._channel.write_frames(frames_out) result = self._channel.rpc.get_request(confirm_uuid, raw=True) self._channel.check_for_errors() if isinstance(result, specification.Basic.Ack): return True return False
[ "def", "_publish_confirm", "(", "self", ",", "frames_out", ")", ":", "confirm_uuid", "=", "self", ".", "_channel", ".", "rpc", ".", "register_request", "(", "[", "'Basic.Ack'", ",", "'Basic.Nack'", "]", ")", "self", ".", "_channel", ".", "write_frames", "(", "frames_out", ")", "result", "=", "self", ".", "_channel", ".", "rpc", ".", "get_request", "(", "confirm_uuid", ",", "raw", "=", "True", ")", "self", ".", "_channel", ".", "check_for_errors", "(", ")", "if", "isinstance", "(", "result", ",", "specification", ".", "Basic", ".", "Ack", ")", ":", "return", "True", "return", "False" ]
Confirm that message was published successfully. :param list frames_out: :rtype: bool
[ "Confirm", "that", "message", "was", "published", "successfully", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L378-L392
917
eandersson/amqpstorm
amqpstorm/basic.py
Basic._create_content_body
def _create_content_body(self, body): """Split body based on the maximum frame size. This function is based on code from Rabbitpy. https://github.com/gmr/rabbitpy :param bytes|str|unicode body: Message payload :rtype: collections.Iterable """ frames = int(math.ceil(len(body) / float(self._max_frame_size))) for offset in compatibility.RANGE(0, frames): start_frame = self._max_frame_size * offset end_frame = start_frame + self._max_frame_size body_len = len(body) if end_frame > body_len: end_frame = body_len yield pamqp_body.ContentBody(body[start_frame:end_frame])
python
def _create_content_body(self, body): frames = int(math.ceil(len(body) / float(self._max_frame_size))) for offset in compatibility.RANGE(0, frames): start_frame = self._max_frame_size * offset end_frame = start_frame + self._max_frame_size body_len = len(body) if end_frame > body_len: end_frame = body_len yield pamqp_body.ContentBody(body[start_frame:end_frame])
[ "def", "_create_content_body", "(", "self", ",", "body", ")", ":", "frames", "=", "int", "(", "math", ".", "ceil", "(", "len", "(", "body", ")", "/", "float", "(", "self", ".", "_max_frame_size", ")", ")", ")", "for", "offset", "in", "compatibility", ".", "RANGE", "(", "0", ",", "frames", ")", ":", "start_frame", "=", "self", ".", "_max_frame_size", "*", "offset", "end_frame", "=", "start_frame", "+", "self", ".", "_max_frame_size", "body_len", "=", "len", "(", "body", ")", "if", "end_frame", ">", "body_len", ":", "end_frame", "=", "body_len", "yield", "pamqp_body", ".", "ContentBody", "(", "body", "[", "start_frame", ":", "end_frame", "]", ")" ]
Split body based on the maximum frame size. This function is based on code from Rabbitpy. https://github.com/gmr/rabbitpy :param bytes|str|unicode body: Message payload :rtype: collections.Iterable
[ "Split", "body", "based", "on", "the", "maximum", "frame", "size", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L394-L411
918
eandersson/amqpstorm
amqpstorm/basic.py
Basic._get_content_body
def _get_content_body(self, message_uuid, body_size): """Get Content Body using RPC requests. :param str uuid_body: Rpc Identifier. :param int body_size: Content Size. :rtype: str """ body = bytes() while len(body) < body_size: body_piece = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if not body_piece.value: break body += body_piece.value return body
python
def _get_content_body(self, message_uuid, body_size): body = bytes() while len(body) < body_size: body_piece = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if not body_piece.value: break body += body_piece.value return body
[ "def", "_get_content_body", "(", "self", ",", "message_uuid", ",", "body_size", ")", ":", "body", "=", "bytes", "(", ")", "while", "len", "(", "body", ")", "<", "body_size", ":", "body_piece", "=", "self", ".", "_channel", ".", "rpc", ".", "get_request", "(", "message_uuid", ",", "raw", "=", "True", ",", "multiple", "=", "True", ")", "if", "not", "body_piece", ".", "value", ":", "break", "body", "+=", "body_piece", ".", "value", "return", "body" ]
Get Content Body using RPC requests. :param str uuid_body: Rpc Identifier. :param int body_size: Content Size. :rtype: str
[ "Get", "Content", "Body", "using", "RPC", "requests", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L413-L428
919
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0.on_frame
def on_frame(self, frame_in): """Handle frames sent to Channel0. :param frame_in: Amqp frame. :return: """ LOGGER.debug('Frame Received: %s', frame_in.name) if frame_in.name == 'Heartbeat': return elif frame_in.name == 'Connection.Close': self._close_connection(frame_in) elif frame_in.name == 'Connection.CloseOk': self._close_connection_ok() elif frame_in.name == 'Connection.Blocked': self._blocked_connection(frame_in) elif frame_in.name == 'Connection.Unblocked': self._unblocked_connection() elif frame_in.name == 'Connection.OpenOk': self._set_connection_state(Stateful.OPEN) elif frame_in.name == 'Connection.Start': self.server_properties = frame_in.server_properties self._send_start_ok(frame_in) elif frame_in.name == 'Connection.Tune': self._send_tune_ok(frame_in) self._send_open_connection() else: LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
python
def on_frame(self, frame_in): LOGGER.debug('Frame Received: %s', frame_in.name) if frame_in.name == 'Heartbeat': return elif frame_in.name == 'Connection.Close': self._close_connection(frame_in) elif frame_in.name == 'Connection.CloseOk': self._close_connection_ok() elif frame_in.name == 'Connection.Blocked': self._blocked_connection(frame_in) elif frame_in.name == 'Connection.Unblocked': self._unblocked_connection() elif frame_in.name == 'Connection.OpenOk': self._set_connection_state(Stateful.OPEN) elif frame_in.name == 'Connection.Start': self.server_properties = frame_in.server_properties self._send_start_ok(frame_in) elif frame_in.name == 'Connection.Tune': self._send_tune_ok(frame_in) self._send_open_connection() else: LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
[ "def", "on_frame", "(", "self", ",", "frame_in", ")", ":", "LOGGER", ".", "debug", "(", "'Frame Received: %s'", ",", "frame_in", ".", "name", ")", "if", "frame_in", ".", "name", "==", "'Heartbeat'", ":", "return", "elif", "frame_in", ".", "name", "==", "'Connection.Close'", ":", "self", ".", "_close_connection", "(", "frame_in", ")", "elif", "frame_in", ".", "name", "==", "'Connection.CloseOk'", ":", "self", ".", "_close_connection_ok", "(", ")", "elif", "frame_in", ".", "name", "==", "'Connection.Blocked'", ":", "self", ".", "_blocked_connection", "(", "frame_in", ")", "elif", "frame_in", ".", "name", "==", "'Connection.Unblocked'", ":", "self", ".", "_unblocked_connection", "(", ")", "elif", "frame_in", ".", "name", "==", "'Connection.OpenOk'", ":", "self", ".", "_set_connection_state", "(", "Stateful", ".", "OPEN", ")", "elif", "frame_in", ".", "name", "==", "'Connection.Start'", ":", "self", ".", "server_properties", "=", "frame_in", ".", "server_properties", "self", ".", "_send_start_ok", "(", "frame_in", ")", "elif", "frame_in", ".", "name", "==", "'Connection.Tune'", ":", "self", ".", "_send_tune_ok", "(", "frame_in", ")", "self", ".", "_send_open_connection", "(", ")", "else", ":", "LOGGER", ".", "error", "(", "'[Channel0] Unhandled Frame: %s'", ",", "frame_in", ".", "name", ")" ]
Handle frames sent to Channel0. :param frame_in: Amqp frame. :return:
[ "Handle", "frames", "sent", "to", "Channel0", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L33-L59
920
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._close_connection
def _close_connection(self, frame_in): """Connection Close. :param specification.Connection.Close frame_in: Amqp frame. :return: """ self._set_connection_state(Stateful.CLOSED) if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Connection was closed by remote server: %s' % reply_text ) exception = AMQPConnectionError(message, reply_code=frame_in.reply_code) self._connection.exceptions.append(exception)
python
def _close_connection(self, frame_in): self._set_connection_state(Stateful.CLOSED) if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Connection was closed by remote server: %s' % reply_text ) exception = AMQPConnectionError(message, reply_code=frame_in.reply_code) self._connection.exceptions.append(exception)
[ "def", "_close_connection", "(", "self", ",", "frame_in", ")", ":", "self", ".", "_set_connection_state", "(", "Stateful", ".", "CLOSED", ")", "if", "frame_in", ".", "reply_code", "!=", "200", ":", "reply_text", "=", "try_utf8_decode", "(", "frame_in", ".", "reply_text", ")", "message", "=", "(", "'Connection was closed by remote server: %s'", "%", "reply_text", ")", "exception", "=", "AMQPConnectionError", "(", "message", ",", "reply_code", "=", "frame_in", ".", "reply_code", ")", "self", ".", "_connection", ".", "exceptions", ".", "append", "(", "exception", ")" ]
Connection Close. :param specification.Connection.Close frame_in: Amqp frame. :return:
[ "Connection", "Close", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L77-L91
921
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._blocked_connection
def _blocked_connection(self, frame_in): """Connection is Blocked. :param frame_in: :return: """ self.is_blocked = True LOGGER.warning( 'Connection is blocked by remote server: %s', try_utf8_decode(frame_in.reason) )
python
def _blocked_connection(self, frame_in): self.is_blocked = True LOGGER.warning( 'Connection is blocked by remote server: %s', try_utf8_decode(frame_in.reason) )
[ "def", "_blocked_connection", "(", "self", ",", "frame_in", ")", ":", "self", ".", "is_blocked", "=", "True", "LOGGER", ".", "warning", "(", "'Connection is blocked by remote server: %s'", ",", "try_utf8_decode", "(", "frame_in", ".", "reason", ")", ")" ]
Connection is Blocked. :param frame_in: :return:
[ "Connection", "is", "Blocked", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L100-L110
922
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._send_start_ok
def _send_start_ok(self, frame_in): """Send Start OK frame. :param specification.Connection.Start frame_in: Amqp frame. :return: """ mechanisms = try_utf8_decode(frame_in.mechanisms) if 'EXTERNAL' in mechanisms: mechanism = 'EXTERNAL' credentials = '\0\0' elif 'PLAIN' in mechanisms: mechanism = 'PLAIN' credentials = self._plain_credentials() else: exception = AMQPConnectionError( 'Unsupported Security Mechanism(s): %s' % frame_in.mechanisms ) self._connection.exceptions.append(exception) return start_ok_frame = specification.Connection.StartOk( mechanism=mechanism, client_properties=self._client_properties(), response=credentials, locale=LOCALE ) self._write_frame(start_ok_frame)
python
def _send_start_ok(self, frame_in): mechanisms = try_utf8_decode(frame_in.mechanisms) if 'EXTERNAL' in mechanisms: mechanism = 'EXTERNAL' credentials = '\0\0' elif 'PLAIN' in mechanisms: mechanism = 'PLAIN' credentials = self._plain_credentials() else: exception = AMQPConnectionError( 'Unsupported Security Mechanism(s): %s' % frame_in.mechanisms ) self._connection.exceptions.append(exception) return start_ok_frame = specification.Connection.StartOk( mechanism=mechanism, client_properties=self._client_properties(), response=credentials, locale=LOCALE ) self._write_frame(start_ok_frame)
[ "def", "_send_start_ok", "(", "self", ",", "frame_in", ")", ":", "mechanisms", "=", "try_utf8_decode", "(", "frame_in", ".", "mechanisms", ")", "if", "'EXTERNAL'", "in", "mechanisms", ":", "mechanism", "=", "'EXTERNAL'", "credentials", "=", "'\\0\\0'", "elif", "'PLAIN'", "in", "mechanisms", ":", "mechanism", "=", "'PLAIN'", "credentials", "=", "self", ".", "_plain_credentials", "(", ")", "else", ":", "exception", "=", "AMQPConnectionError", "(", "'Unsupported Security Mechanism(s): %s'", "%", "frame_in", ".", "mechanisms", ")", "self", ".", "_connection", ".", "exceptions", ".", "append", "(", "exception", ")", "return", "start_ok_frame", "=", "specification", ".", "Connection", ".", "StartOk", "(", "mechanism", "=", "mechanism", ",", "client_properties", "=", "self", ".", "_client_properties", "(", ")", ",", "response", "=", "credentials", ",", "locale", "=", "LOCALE", ")", "self", ".", "_write_frame", "(", "start_ok_frame", ")" ]
Send Start OK frame. :param specification.Connection.Start frame_in: Amqp frame. :return:
[ "Send", "Start", "OK", "frame", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L140-L166
923
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._send_tune_ok
def _send_tune_ok(self, frame_in): """Send Tune OK frame. :param specification.Connection.Tune frame_in: Tune frame. :return: """ self.max_allowed_channels = self._negotiate(frame_in.channel_max, MAX_CHANNELS) self.max_frame_size = self._negotiate(frame_in.frame_max, MAX_FRAME_SIZE) LOGGER.debug( 'Negotiated max frame size %d, max channels %d', self.max_frame_size, self.max_allowed_channels ) tune_ok_frame = specification.Connection.TuneOk( channel_max=self.max_allowed_channels, frame_max=self.max_frame_size, heartbeat=self._heartbeat) self._write_frame(tune_ok_frame)
python
def _send_tune_ok(self, frame_in): self.max_allowed_channels = self._negotiate(frame_in.channel_max, MAX_CHANNELS) self.max_frame_size = self._negotiate(frame_in.frame_max, MAX_FRAME_SIZE) LOGGER.debug( 'Negotiated max frame size %d, max channels %d', self.max_frame_size, self.max_allowed_channels ) tune_ok_frame = specification.Connection.TuneOk( channel_max=self.max_allowed_channels, frame_max=self.max_frame_size, heartbeat=self._heartbeat) self._write_frame(tune_ok_frame)
[ "def", "_send_tune_ok", "(", "self", ",", "frame_in", ")", ":", "self", ".", "max_allowed_channels", "=", "self", ".", "_negotiate", "(", "frame_in", ".", "channel_max", ",", "MAX_CHANNELS", ")", "self", ".", "max_frame_size", "=", "self", ".", "_negotiate", "(", "frame_in", ".", "frame_max", ",", "MAX_FRAME_SIZE", ")", "LOGGER", ".", "debug", "(", "'Negotiated max frame size %d, max channels %d'", ",", "self", ".", "max_frame_size", ",", "self", ".", "max_allowed_channels", ")", "tune_ok_frame", "=", "specification", ".", "Connection", ".", "TuneOk", "(", "channel_max", "=", "self", ".", "max_allowed_channels", ",", "frame_max", "=", "self", ".", "max_frame_size", ",", "heartbeat", "=", "self", ".", "_heartbeat", ")", "self", ".", "_write_frame", "(", "tune_ok_frame", ")" ]
Send Tune OK frame. :param specification.Connection.Tune frame_in: Tune frame. :return:
[ "Send", "Tune", "OK", "frame", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L168-L188
924
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._send_open_connection
def _send_open_connection(self): """Send Open Connection frame. :return: """ open_frame = specification.Connection.Open( virtual_host=self._parameters['virtual_host'] ) self._write_frame(open_frame)
python
def _send_open_connection(self): open_frame = specification.Connection.Open( virtual_host=self._parameters['virtual_host'] ) self._write_frame(open_frame)
[ "def", "_send_open_connection", "(", "self", ")", ":", "open_frame", "=", "specification", ".", "Connection", ".", "Open", "(", "virtual_host", "=", "self", ".", "_parameters", "[", "'virtual_host'", "]", ")", "self", ".", "_write_frame", "(", "open_frame", ")" ]
Send Open Connection frame. :return:
[ "Send", "Open", "Connection", "frame", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L190-L198
925
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._write_frame
def _write_frame(self, frame_out): """Write a pamqp frame from Channel0. :param frame_out: Amqp frame. :return: """ self._connection.write_frame(0, frame_out) LOGGER.debug('Frame Sent: %s', frame_out.name)
python
def _write_frame(self, frame_out): self._connection.write_frame(0, frame_out) LOGGER.debug('Frame Sent: %s', frame_out.name)
[ "def", "_write_frame", "(", "self", ",", "frame_out", ")", ":", "self", ".", "_connection", ".", "write_frame", "(", "0", ",", "frame_out", ")", "LOGGER", ".", "debug", "(", "'Frame Sent: %s'", ",", "frame_out", ".", "name", ")" ]
Write a pamqp frame from Channel0. :param frame_out: Amqp frame. :return:
[ "Write", "a", "pamqp", "frame", "from", "Channel0", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L208-L215
926
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._client_properties
def _client_properties(): """AMQPStorm Client Properties. :rtype: dict """ return { 'product': 'AMQPStorm', 'platform': 'Python %s (%s)' % (platform.python_version(), platform.python_implementation()), 'capabilities': { 'basic.nack': True, 'connection.blocked': True, 'publisher_confirms': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, }, 'information': 'See https://github.com/eandersson/amqpstorm', 'version': __version__ }
python
def _client_properties(): return { 'product': 'AMQPStorm', 'platform': 'Python %s (%s)' % (platform.python_version(), platform.python_implementation()), 'capabilities': { 'basic.nack': True, 'connection.blocked': True, 'publisher_confirms': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, }, 'information': 'See https://github.com/eandersson/amqpstorm', 'version': __version__ }
[ "def", "_client_properties", "(", ")", ":", "return", "{", "'product'", ":", "'AMQPStorm'", ",", "'platform'", ":", "'Python %s (%s)'", "%", "(", "platform", ".", "python_version", "(", ")", ",", "platform", ".", "python_implementation", "(", ")", ")", ",", "'capabilities'", ":", "{", "'basic.nack'", ":", "True", ",", "'connection.blocked'", ":", "True", ",", "'publisher_confirms'", ":", "True", ",", "'consumer_cancel_notify'", ":", "True", ",", "'authentication_failure_close'", ":", "True", ",", "}", ",", "'information'", ":", "'See https://github.com/eandersson/amqpstorm'", ",", "'version'", ":", "__version__", "}" ]
AMQPStorm Client Properties. :rtype: dict
[ "AMQPStorm", "Client", "Properties", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L218-L236
927
eandersson/amqpstorm
amqpstorm/channel.py
Channel.build_inbound_messages
def build_inbound_messages(self, break_on_empty=False, to_tuple=False, auto_decode=True): """Build messages in the inbound queue. :param bool break_on_empty: Should we break the loop when there are no more messages in our inbound queue. This does not guarantee that the upstream queue is empty, as it's possible that if messages are consumed faster than delivered, the inbound queue will then be emptied and the consumption will be broken. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: :py:class:`generator` """ self.check_for_errors() while not self.is_closed: message = self._build_message(auto_decode=auto_decode) if not message: self.check_for_errors() sleep(IDLE_WAIT) if break_on_empty and not self._inbound: break continue if to_tuple: yield message.to_tuple() continue yield message
python
def build_inbound_messages(self, break_on_empty=False, to_tuple=False, auto_decode=True): self.check_for_errors() while not self.is_closed: message = self._build_message(auto_decode=auto_decode) if not message: self.check_for_errors() sleep(IDLE_WAIT) if break_on_empty and not self._inbound: break continue if to_tuple: yield message.to_tuple() continue yield message
[ "def", "build_inbound_messages", "(", "self", ",", "break_on_empty", "=", "False", ",", "to_tuple", "=", "False", ",", "auto_decode", "=", "True", ")", ":", "self", ".", "check_for_errors", "(", ")", "while", "not", "self", ".", "is_closed", ":", "message", "=", "self", ".", "_build_message", "(", "auto_decode", "=", "auto_decode", ")", "if", "not", "message", ":", "self", ".", "check_for_errors", "(", ")", "sleep", "(", "IDLE_WAIT", ")", "if", "break_on_empty", "and", "not", "self", ".", "_inbound", ":", "break", "continue", "if", "to_tuple", ":", "yield", "message", ".", "to_tuple", "(", ")", "continue", "yield", "message" ]
Build messages in the inbound queue. :param bool break_on_empty: Should we break the loop when there are no more messages in our inbound queue. This does not guarantee that the upstream queue is empty, as it's possible that if messages are consumed faster than delivered, the inbound queue will then be emptied and the consumption will be broken. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: :py:class:`generator`
[ "Build", "messages", "in", "the", "inbound", "queue", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L97-L131
928
eandersson/amqpstorm
amqpstorm/channel.py
Channel.check_for_errors
def check_for_errors(self): """Check connection and channel for errors. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ try: self._connection.check_for_errors() except AMQPConnectionError: self.set_state(self.CLOSED) raise if self.exceptions: exception = self.exceptions[0] if self.is_open: self.exceptions.pop(0) raise exception if self.is_closed: raise AMQPChannelError('channel was closed')
python
def check_for_errors(self): try: self._connection.check_for_errors() except AMQPConnectionError: self.set_state(self.CLOSED) raise if self.exceptions: exception = self.exceptions[0] if self.is_open: self.exceptions.pop(0) raise exception if self.is_closed: raise AMQPChannelError('channel was closed')
[ "def", "check_for_errors", "(", "self", ")", ":", "try", ":", "self", ".", "_connection", ".", "check_for_errors", "(", ")", "except", "AMQPConnectionError", ":", "self", ".", "set_state", "(", "self", ".", "CLOSED", ")", "raise", "if", "self", ".", "exceptions", ":", "exception", "=", "self", ".", "exceptions", "[", "0", "]", "if", "self", ".", "is_open", ":", "self", ".", "exceptions", ".", "pop", "(", "0", ")", "raise", "exception", "if", "self", ".", "is_closed", ":", "raise", "AMQPChannelError", "(", "'channel was closed'", ")" ]
Check connection and channel for errors. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
[ "Check", "connection", "and", "channel", "for", "errors", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L174-L195
929
eandersson/amqpstorm
amqpstorm/channel.py
Channel.confirm_deliveries
def confirm_deliveries(self): """Set the channel to confirm that each message has been successfully delivered. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ self._confirming_deliveries = True confirm_frame = specification.Confirm.Select() return self.rpc_request(confirm_frame)
python
def confirm_deliveries(self): self._confirming_deliveries = True confirm_frame = specification.Confirm.Select() return self.rpc_request(confirm_frame)
[ "def", "confirm_deliveries", "(", "self", ")", ":", "self", ".", "_confirming_deliveries", "=", "True", "confirm_frame", "=", "specification", ".", "Confirm", ".", "Select", "(", ")", "return", "self", ".", "rpc_request", "(", "confirm_frame", ")" ]
Set the channel to confirm that each message has been successfully delivered. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
[ "Set", "the", "channel", "to", "confirm", "that", "each", "message", "has", "been", "successfully", "delivered", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L197-L209
930
eandersson/amqpstorm
amqpstorm/channel.py
Channel.on_frame
def on_frame(self, frame_in): """Handle frame sent to this specific channel. :param pamqp.Frame frame_in: Amqp frame. :return: """ if self.rpc.on_frame(frame_in): return if frame_in.name in CONTENT_FRAME: self._inbound.append(frame_in) elif frame_in.name == 'Basic.Cancel': self._basic_cancel(frame_in) elif frame_in.name == 'Basic.CancelOk': self.remove_consumer_tag(frame_in.consumer_tag) elif frame_in.name == 'Basic.ConsumeOk': self.add_consumer_tag(frame_in['consumer_tag']) elif frame_in.name == 'Basic.Return': self._basic_return(frame_in) elif frame_in.name == 'Channel.Close': self._close_channel(frame_in) elif frame_in.name == 'Channel.Flow': self.write_frame(specification.Channel.FlowOk(frame_in.active)) else: LOGGER.error( '[Channel%d] Unhandled Frame: %s -- %s', self.channel_id, frame_in.name, dict(frame_in) )
python
def on_frame(self, frame_in): if self.rpc.on_frame(frame_in): return if frame_in.name in CONTENT_FRAME: self._inbound.append(frame_in) elif frame_in.name == 'Basic.Cancel': self._basic_cancel(frame_in) elif frame_in.name == 'Basic.CancelOk': self.remove_consumer_tag(frame_in.consumer_tag) elif frame_in.name == 'Basic.ConsumeOk': self.add_consumer_tag(frame_in['consumer_tag']) elif frame_in.name == 'Basic.Return': self._basic_return(frame_in) elif frame_in.name == 'Channel.Close': self._close_channel(frame_in) elif frame_in.name == 'Channel.Flow': self.write_frame(specification.Channel.FlowOk(frame_in.active)) else: LOGGER.error( '[Channel%d] Unhandled Frame: %s -- %s', self.channel_id, frame_in.name, dict(frame_in) )
[ "def", "on_frame", "(", "self", ",", "frame_in", ")", ":", "if", "self", ".", "rpc", ".", "on_frame", "(", "frame_in", ")", ":", "return", "if", "frame_in", ".", "name", "in", "CONTENT_FRAME", ":", "self", ".", "_inbound", ".", "append", "(", "frame_in", ")", "elif", "frame_in", ".", "name", "==", "'Basic.Cancel'", ":", "self", ".", "_basic_cancel", "(", "frame_in", ")", "elif", "frame_in", ".", "name", "==", "'Basic.CancelOk'", ":", "self", ".", "remove_consumer_tag", "(", "frame_in", ".", "consumer_tag", ")", "elif", "frame_in", ".", "name", "==", "'Basic.ConsumeOk'", ":", "self", ".", "add_consumer_tag", "(", "frame_in", "[", "'consumer_tag'", "]", ")", "elif", "frame_in", ".", "name", "==", "'Basic.Return'", ":", "self", ".", "_basic_return", "(", "frame_in", ")", "elif", "frame_in", ".", "name", "==", "'Channel.Close'", ":", "self", ".", "_close_channel", "(", "frame_in", ")", "elif", "frame_in", ".", "name", "==", "'Channel.Flow'", ":", "self", ".", "write_frame", "(", "specification", ".", "Channel", ".", "FlowOk", "(", "frame_in", ".", "active", ")", ")", "else", ":", "LOGGER", ".", "error", "(", "'[Channel%d] Unhandled Frame: %s -- %s'", ",", "self", ".", "channel_id", ",", "frame_in", ".", "name", ",", "dict", "(", "frame_in", ")", ")" ]
Handle frame sent to this specific channel. :param pamqp.Frame frame_in: Amqp frame. :return:
[ "Handle", "frame", "sent", "to", "this", "specific", "channel", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L219-L246
931
eandersson/amqpstorm
amqpstorm/channel.py
Channel.process_data_events
def process_data_events(self, to_tuple=False, auto_decode=True): """Consume inbound messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not self._consumer_callbacks: raise AMQPChannelError('no consumer callback defined') for message in self.build_inbound_messages(break_on_empty=True, auto_decode=auto_decode): consumer_tag = message._method.get('consumer_tag') if to_tuple: # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](*message.to_tuple()) continue # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](message)
python
def process_data_events(self, to_tuple=False, auto_decode=True): if not self._consumer_callbacks: raise AMQPChannelError('no consumer callback defined') for message in self.build_inbound_messages(break_on_empty=True, auto_decode=auto_decode): consumer_tag = message._method.get('consumer_tag') if to_tuple: # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](*message.to_tuple()) continue # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](message)
[ "def", "process_data_events", "(", "self", ",", "to_tuple", "=", "False", ",", "auto_decode", "=", "True", ")", ":", "if", "not", "self", ".", "_consumer_callbacks", ":", "raise", "AMQPChannelError", "(", "'no consumer callback defined'", ")", "for", "message", "in", "self", ".", "build_inbound_messages", "(", "break_on_empty", "=", "True", ",", "auto_decode", "=", "auto_decode", ")", ":", "consumer_tag", "=", "message", ".", "_method", ".", "get", "(", "'consumer_tag'", ")", "if", "to_tuple", ":", "# noinspection PyCallingNonCallable", "self", ".", "_consumer_callbacks", "[", "consumer_tag", "]", "(", "*", "message", ".", "to_tuple", "(", ")", ")", "continue", "# noinspection PyCallingNonCallable", "self", ".", "_consumer_callbacks", "[", "consumer_tag", "]", "(", "message", ")" ]
Consume inbound messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
[ "Consume", "inbound", "messages", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L259-L282
932
eandersson/amqpstorm
amqpstorm/channel.py
Channel.rpc_request
def rpc_request(self, frame_out, connection_adapter=None): """Perform a RPC Request. :param specification.Frame frame_out: Amqp frame. :rtype: dict """ with self.rpc.lock: uuid = self.rpc.register_request(frame_out.valid_responses) self._connection.write_frame(self.channel_id, frame_out) return self.rpc.get_request( uuid, connection_adapter=connection_adapter )
python
def rpc_request(self, frame_out, connection_adapter=None): with self.rpc.lock: uuid = self.rpc.register_request(frame_out.valid_responses) self._connection.write_frame(self.channel_id, frame_out) return self.rpc.get_request( uuid, connection_adapter=connection_adapter )
[ "def", "rpc_request", "(", "self", ",", "frame_out", ",", "connection_adapter", "=", "None", ")", ":", "with", "self", ".", "rpc", ".", "lock", ":", "uuid", "=", "self", ".", "rpc", ".", "register_request", "(", "frame_out", ".", "valid_responses", ")", "self", ".", "_connection", ".", "write_frame", "(", "self", ".", "channel_id", ",", "frame_out", ")", "return", "self", ".", "rpc", ".", "get_request", "(", "uuid", ",", "connection_adapter", "=", "connection_adapter", ")" ]
Perform a RPC Request. :param specification.Frame frame_out: Amqp frame. :rtype: dict
[ "Perform", "a", "RPC", "Request", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L284-L295
933
eandersson/amqpstorm
amqpstorm/channel.py
Channel.start_consuming
def start_consuming(self, to_tuple=False, auto_decode=True): """Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ while not self.is_closed: self.process_data_events( to_tuple=to_tuple, auto_decode=auto_decode ) if self.consumer_tags: sleep(IDLE_WAIT) continue break
python
def start_consuming(self, to_tuple=False, auto_decode=True): while not self.is_closed: self.process_data_events( to_tuple=to_tuple, auto_decode=auto_decode ) if self.consumer_tags: sleep(IDLE_WAIT) continue break
[ "def", "start_consuming", "(", "self", ",", "to_tuple", "=", "False", ",", "auto_decode", "=", "True", ")", ":", "while", "not", "self", ".", "is_closed", ":", "self", ".", "process_data_events", "(", "to_tuple", "=", "to_tuple", ",", "auto_decode", "=", "auto_decode", ")", "if", "self", ".", "consumer_tags", ":", "sleep", "(", "IDLE_WAIT", ")", "continue", "break" ]
Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
[ "Start", "consuming", "messages", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L297-L318
934
eandersson/amqpstorm
amqpstorm/channel.py
Channel.stop_consuming
def stop_consuming(self): """Stop consuming messages. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not self.consumer_tags: return if not self.is_closed: for tag in self.consumer_tags: self.basic.cancel(tag) self.remove_consumer_tag()
python
def stop_consuming(self): if not self.consumer_tags: return if not self.is_closed: for tag in self.consumer_tags: self.basic.cancel(tag) self.remove_consumer_tag()
[ "def", "stop_consuming", "(", "self", ")", ":", "if", "not", "self", ".", "consumer_tags", ":", "return", "if", "not", "self", ".", "is_closed", ":", "for", "tag", "in", "self", ".", "consumer_tags", ":", "self", ".", "basic", ".", "cancel", "(", "tag", ")", "self", ".", "remove_consumer_tag", "(", ")" ]
Stop consuming messages. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
[ "Stop", "consuming", "messages", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L320-L334
935
eandersson/amqpstorm
amqpstorm/channel.py
Channel.write_frame
def write_frame(self, frame_out): """Write a pamqp frame from the current channel. :param specification.Frame frame_out: A single pamqp frame. :return: """ self.check_for_errors() self._connection.write_frame(self.channel_id, frame_out)
python
def write_frame(self, frame_out): self.check_for_errors() self._connection.write_frame(self.channel_id, frame_out)
[ "def", "write_frame", "(", "self", ",", "frame_out", ")", ":", "self", ".", "check_for_errors", "(", ")", "self", ".", "_connection", ".", "write_frame", "(", "self", ".", "channel_id", ",", "frame_out", ")" ]
Write a pamqp frame from the current channel. :param specification.Frame frame_out: A single pamqp frame. :return:
[ "Write", "a", "pamqp", "frame", "from", "the", "current", "channel", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L336-L344
936
eandersson/amqpstorm
amqpstorm/channel.py
Channel.write_frames
def write_frames(self, frames_out): """Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return: """ self.check_for_errors() self._connection.write_frames(self.channel_id, frames_out)
python
def write_frames(self, frames_out): self.check_for_errors() self._connection.write_frames(self.channel_id, frames_out)
[ "def", "write_frames", "(", "self", ",", "frames_out", ")", ":", "self", ".", "check_for_errors", "(", ")", "self", ".", "_connection", ".", "write_frames", "(", "self", ".", "channel_id", ",", "frames_out", ")" ]
Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return:
[ "Write", "multiple", "pamqp", "frames", "from", "the", "current", "channel", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L346-L354
937
eandersson/amqpstorm
amqpstorm/channel.py
Channel._basic_cancel
def _basic_cancel(self, frame_in): """Handle a Basic Cancel frame. :param specification.Basic.Cancel frame_in: Amqp frame. :return: """ LOGGER.warning( 'Received Basic.Cancel on consumer_tag: %s', try_utf8_decode(frame_in.consumer_tag) ) self.remove_consumer_tag(frame_in.consumer_tag)
python
def _basic_cancel(self, frame_in): LOGGER.warning( 'Received Basic.Cancel on consumer_tag: %s', try_utf8_decode(frame_in.consumer_tag) ) self.remove_consumer_tag(frame_in.consumer_tag)
[ "def", "_basic_cancel", "(", "self", ",", "frame_in", ")", ":", "LOGGER", ".", "warning", "(", "'Received Basic.Cancel on consumer_tag: %s'", ",", "try_utf8_decode", "(", "frame_in", ".", "consumer_tag", ")", ")", "self", ".", "remove_consumer_tag", "(", "frame_in", ".", "consumer_tag", ")" ]
Handle a Basic Cancel frame. :param specification.Basic.Cancel frame_in: Amqp frame. :return:
[ "Handle", "a", "Basic", "Cancel", "frame", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L356-L367
938
eandersson/amqpstorm
amqpstorm/channel.py
Channel._basic_return
def _basic_return(self, frame_in): """Handle a Basic Return Frame and treat it as an error. :param specification.Basic.Return frame_in: Amqp frame. :return: """ reply_text = try_utf8_decode(frame_in.reply_text) message = ( "Message not delivered: %s (%s) to queue '%s' from exchange '%s'" % ( reply_text, frame_in.reply_code, frame_in.routing_key, frame_in.exchange ) ) exception = AMQPMessageError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception)
python
def _basic_return(self, frame_in): reply_text = try_utf8_decode(frame_in.reply_text) message = ( "Message not delivered: %s (%s) to queue '%s' from exchange '%s'" % ( reply_text, frame_in.reply_code, frame_in.routing_key, frame_in.exchange ) ) exception = AMQPMessageError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception)
[ "def", "_basic_return", "(", "self", ",", "frame_in", ")", ":", "reply_text", "=", "try_utf8_decode", "(", "frame_in", ".", "reply_text", ")", "message", "=", "(", "\"Message not delivered: %s (%s) to queue '%s' from exchange '%s'\"", "%", "(", "reply_text", ",", "frame_in", ".", "reply_code", ",", "frame_in", ".", "routing_key", ",", "frame_in", ".", "exchange", ")", ")", "exception", "=", "AMQPMessageError", "(", "message", ",", "reply_code", "=", "frame_in", ".", "reply_code", ")", "self", ".", "exceptions", ".", "append", "(", "exception", ")" ]
Handle a Basic Return Frame and treat it as an error. :param specification.Basic.Return frame_in: Amqp frame. :return:
[ "Handle", "a", "Basic", "Return", "Frame", "and", "treat", "it", "as", "an", "error", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L369-L388
939
eandersson/amqpstorm
amqpstorm/channel.py
Channel._build_message
def _build_message(self, auto_decode): """Fetch and build a complete Message from the inbound queue. :param bool auto_decode: Auto-decode strings when possible. :rtype: Message """ with self.lock: if len(self._inbound) < 2: return None headers = self._build_message_headers() if not headers: return None basic_deliver, content_header = headers body = self._build_message_body(content_header.body_size) message = Message(channel=self, body=body, method=dict(basic_deliver), properties=dict(content_header.properties), auto_decode=auto_decode) return message
python
def _build_message(self, auto_decode): with self.lock: if len(self._inbound) < 2: return None headers = self._build_message_headers() if not headers: return None basic_deliver, content_header = headers body = self._build_message_body(content_header.body_size) message = Message(channel=self, body=body, method=dict(basic_deliver), properties=dict(content_header.properties), auto_decode=auto_decode) return message
[ "def", "_build_message", "(", "self", ",", "auto_decode", ")", ":", "with", "self", ".", "lock", ":", "if", "len", "(", "self", ".", "_inbound", ")", "<", "2", ":", "return", "None", "headers", "=", "self", ".", "_build_message_headers", "(", ")", "if", "not", "headers", ":", "return", "None", "basic_deliver", ",", "content_header", "=", "headers", "body", "=", "self", ".", "_build_message_body", "(", "content_header", ".", "body_size", ")", "message", "=", "Message", "(", "channel", "=", "self", ",", "body", "=", "body", ",", "method", "=", "dict", "(", "basic_deliver", ")", ",", "properties", "=", "dict", "(", "content_header", ".", "properties", ")", ",", "auto_decode", "=", "auto_decode", ")", "return", "message" ]
Fetch and build a complete Message from the inbound queue. :param bool auto_decode: Auto-decode strings when possible. :rtype: Message
[ "Fetch", "and", "build", "a", "complete", "Message", "from", "the", "inbound", "queue", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L390-L411
940
eandersson/amqpstorm
amqpstorm/channel.py
Channel._build_message_body
def _build_message_body(self, body_size): """Build the Message body from the inbound queue. :rtype: str """ body = bytes() while len(body) < body_size: if not self._inbound: self.check_for_errors() sleep(IDLE_WAIT) continue body_piece = self._inbound.pop(0) if not body_piece.value: break body += body_piece.value return body
python
def _build_message_body(self, body_size): body = bytes() while len(body) < body_size: if not self._inbound: self.check_for_errors() sleep(IDLE_WAIT) continue body_piece = self._inbound.pop(0) if not body_piece.value: break body += body_piece.value return body
[ "def", "_build_message_body", "(", "self", ",", "body_size", ")", ":", "body", "=", "bytes", "(", ")", "while", "len", "(", "body", ")", "<", "body_size", ":", "if", "not", "self", ".", "_inbound", ":", "self", ".", "check_for_errors", "(", ")", "sleep", "(", "IDLE_WAIT", ")", "continue", "body_piece", "=", "self", ".", "_inbound", ".", "pop", "(", "0", ")", "if", "not", "body_piece", ".", "value", ":", "break", "body", "+=", "body_piece", ".", "value", "return", "body" ]
Build the Message body from the inbound queue. :rtype: str
[ "Build", "the", "Message", "body", "from", "the", "inbound", "queue", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L437-L452
941
eandersson/amqpstorm
amqpstorm/management/user.py
User.create
def create(self, username, password, tags=''): """Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None """ user_payload = json.dumps({ 'password': password, 'tags': tags }) return self.http_client.put(API_USER % username, payload=user_payload)
python
def create(self, username, password, tags=''): user_payload = json.dumps({ 'password': password, 'tags': tags }) return self.http_client.put(API_USER % username, payload=user_payload)
[ "def", "create", "(", "self", ",", "username", ",", "password", ",", "tags", "=", "''", ")", ":", "user_payload", "=", "json", ".", "dumps", "(", "{", "'password'", ":", "password", ",", "'tags'", ":", "tags", "}", ")", "return", "self", ".", "http_client", ".", "put", "(", "API_USER", "%", "username", ",", "payload", "=", "user_payload", ")" ]
Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None
[ "Create", "User", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L28-L42
942
eandersson/amqpstorm
amqpstorm/management/user.py
User.get_permission
def get_permission(self, username, virtual_host): """Get User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.get(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
python
def get_permission(self, username, virtual_host): virtual_host = quote(virtual_host, '') return self.http_client.get(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
[ "def", "get_permission", "(", "self", ",", "username", ",", "virtual_host", ")", ":", "virtual_host", "=", "quote", "(", "virtual_host", ",", "''", ")", "return", "self", ".", "http_client", ".", "get", "(", "API_USER_VIRTUAL_HOST_PERMISSIONS", "%", "(", "virtual_host", ",", "username", ")", ")" ]
Get User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
[ "Get", "User", "permissions", "for", "the", "configured", "virtual", "host", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L53-L69
943
eandersson/amqpstorm
amqpstorm/management/user.py
User.set_permission
def set_permission(self, username, virtual_host, configure_regex='.*', write_regex='.*', read_regex='.*'): """Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') permission_payload = json.dumps({ "configure": configure_regex, "read": read_regex, "write": write_regex }) return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ), payload=permission_payload)
python
def set_permission(self, username, virtual_host, configure_regex='.*', write_regex='.*', read_regex='.*'): virtual_host = quote(virtual_host, '') permission_payload = json.dumps({ "configure": configure_regex, "read": read_regex, "write": write_regex }) return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ), payload=permission_payload)
[ "def", "set_permission", "(", "self", ",", "username", ",", "virtual_host", ",", "configure_regex", "=", "'.*'", ",", "write_regex", "=", "'.*'", ",", "read_regex", "=", "'.*'", ")", ":", "virtual_host", "=", "quote", "(", "virtual_host", ",", "''", ")", "permission_payload", "=", "json", ".", "dumps", "(", "{", "\"configure\"", ":", "configure_regex", ",", "\"read\"", ":", "read_regex", ",", "\"write\"", ":", "write_regex", "}", ")", "return", "self", ".", "http_client", ".", "put", "(", "API_USER_VIRTUAL_HOST_PERMISSIONS", "%", "(", "virtual_host", ",", "username", ")", ",", "payload", "=", "permission_payload", ")" ]
Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
[ "Set", "User", "permissions", "for", "the", "configured", "virtual", "host", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L86-L115
944
eandersson/amqpstorm
amqpstorm/management/user.py
User.delete_permission
def delete_permission(self, username, virtual_host): """Delete User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.delete( API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
python
def delete_permission(self, username, virtual_host): virtual_host = quote(virtual_host, '') return self.http_client.delete( API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
[ "def", "delete_permission", "(", "self", ",", "username", ",", "virtual_host", ")", ":", "virtual_host", "=", "quote", "(", "virtual_host", ",", "''", ")", "return", "self", ".", "http_client", ".", "delete", "(", "API_USER_VIRTUAL_HOST_PERMISSIONS", "%", "(", "virtual_host", ",", "username", ")", ")" ]
Delete User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
[ "Delete", "User", "permissions", "for", "the", "configured", "virtual", "host", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L117-L134
945
eandersson/amqpstorm
examples/scalable_rpc_server.py
ScalableRpcServer.start_server
def start_server(self): """Start the RPC Server. :return: """ self._stopped.clear() if not self._connection or self._connection.is_closed: self._create_connection() while not self._stopped.is_set(): try: # Check our connection for errors. self._connection.check_for_errors() self._update_consumers() except amqpstorm.AMQPError as why: # If an error occurs, re-connect and let update_consumers # re-open the channels. LOGGER.warning(why) self._stop_consumers() self._create_connection() time.sleep(1)
python
def start_server(self): self._stopped.clear() if not self._connection or self._connection.is_closed: self._create_connection() while not self._stopped.is_set(): try: # Check our connection for errors. self._connection.check_for_errors() self._update_consumers() except amqpstorm.AMQPError as why: # If an error occurs, re-connect and let update_consumers # re-open the channels. LOGGER.warning(why) self._stop_consumers() self._create_connection() time.sleep(1)
[ "def", "start_server", "(", "self", ")", ":", "self", ".", "_stopped", ".", "clear", "(", ")", "if", "not", "self", ".", "_connection", "or", "self", ".", "_connection", ".", "is_closed", ":", "self", ".", "_create_connection", "(", ")", "while", "not", "self", ".", "_stopped", ".", "is_set", "(", ")", ":", "try", ":", "# Check our connection for errors.", "self", ".", "_connection", ".", "check_for_errors", "(", ")", "self", ".", "_update_consumers", "(", ")", "except", "amqpstorm", ".", "AMQPError", "as", "why", ":", "# If an error occurs, re-connect and let update_consumers", "# re-open the channels.", "LOGGER", ".", "warning", "(", "why", ")", "self", ".", "_stop_consumers", "(", ")", "self", ".", "_create_connection", "(", ")", "time", ".", "sleep", "(", "1", ")" ]
Start the RPC Server. :return:
[ "Start", "the", "RPC", "Server", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/examples/scalable_rpc_server.py#L41-L60
946
eandersson/amqpstorm
examples/scalable_rpc_server.py
ScalableRpcServer._update_consumers
def _update_consumers(self): """Update Consumers. - Add more if requested. - Make sure the consumers are healthy. - Remove excess consumers. :return: """ # Do we need to start more consumers. consumer_to_start = \ min(max(self.number_of_consumers - len(self._consumers), 0), 2) for _ in range(consumer_to_start): consumer = Consumer(self.rpc_queue) self._start_consumer(consumer) self._consumers.append(consumer) # Check that all our consumers are active. for consumer in self._consumers: if consumer.active: continue self._start_consumer(consumer) break # Do we have any overflow of consumers. self._stop_consumers(self.number_of_consumers)
python
def _update_consumers(self): # Do we need to start more consumers. consumer_to_start = \ min(max(self.number_of_consumers - len(self._consumers), 0), 2) for _ in range(consumer_to_start): consumer = Consumer(self.rpc_queue) self._start_consumer(consumer) self._consumers.append(consumer) # Check that all our consumers are active. for consumer in self._consumers: if consumer.active: continue self._start_consumer(consumer) break # Do we have any overflow of consumers. self._stop_consumers(self.number_of_consumers)
[ "def", "_update_consumers", "(", "self", ")", ":", "# Do we need to start more consumers.", "consumer_to_start", "=", "min", "(", "max", "(", "self", ".", "number_of_consumers", "-", "len", "(", "self", ".", "_consumers", ")", ",", "0", ")", ",", "2", ")", "for", "_", "in", "range", "(", "consumer_to_start", ")", ":", "consumer", "=", "Consumer", "(", "self", ".", "rpc_queue", ")", "self", ".", "_start_consumer", "(", "consumer", ")", "self", ".", "_consumers", ".", "append", "(", "consumer", ")", "# Check that all our consumers are active.", "for", "consumer", "in", "self", ".", "_consumers", ":", "if", "consumer", ".", "active", ":", "continue", "self", ".", "_start_consumer", "(", "consumer", ")", "break", "# Do we have any overflow of consumers.", "self", ".", "_stop_consumers", "(", "self", ".", "number_of_consumers", ")" ]
Update Consumers. - Add more if requested. - Make sure the consumers are healthy. - Remove excess consumers. :return:
[ "Update", "Consumers", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/examples/scalable_rpc_server.py#L112-L137
947
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.get
def get(self, path, payload=None, headers=None): """HTTP GET operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('get', path, payload, headers)
python
def get(self, path, payload=None, headers=None): return self._request('get', path, payload, headers)
[ "def", "get", "(", "self", ",", "path", ",", "payload", "=", "None", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "_request", "(", "'get'", ",", "path", ",", "payload", ",", "headers", ")" ]
HTTP GET operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
[ "HTTP", "GET", "operation", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L17-L29
948
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.post
def post(self, path, payload=None, headers=None): """HTTP POST operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('post', path, payload, headers)
python
def post(self, path, payload=None, headers=None): return self._request('post', path, payload, headers)
[ "def", "post", "(", "self", ",", "path", ",", "payload", "=", "None", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "_request", "(", "'post'", ",", "path", ",", "payload", ",", "headers", ")" ]
HTTP POST operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
[ "HTTP", "POST", "operation", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L31-L43
949
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.delete
def delete(self, path, payload=None, headers=None): """HTTP DELETE operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('delete', path, payload, headers)
python
def delete(self, path, payload=None, headers=None): return self._request('delete', path, payload, headers)
[ "def", "delete", "(", "self", ",", "path", ",", "payload", "=", "None", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "_request", "(", "'delete'", ",", "path", ",", "payload", ",", "headers", ")" ]
HTTP DELETE operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
[ "HTTP", "DELETE", "operation", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L45-L57
950
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.put
def put(self, path, payload=None, headers=None): """HTTP PUT operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('put', path, payload, headers)
python
def put(self, path, payload=None, headers=None): return self._request('put', path, payload, headers)
[ "def", "put", "(", "self", ",", "path", ",", "payload", "=", "None", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "_request", "(", "'put'", ",", "path", ",", "payload", ",", "headers", ")" ]
HTTP PUT operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
[ "HTTP", "PUT", "operation", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L59-L71
951
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient._request
def _request(self, method, path, payload=None, headers=None): """HTTP operation. :param method: Operation type (e.g. post) :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ url = urlparse.urljoin(self._base_url, 'api/%s' % path) headers = headers or {} headers['content-type'] = 'application/json' try: response = requests.request( method, url, auth=self._auth, data=payload, headers=headers, cert=self._cert, verify=self._verify, timeout=self._timeout ) except requests.RequestException as why: raise ApiConnectionError(str(why)) json_response = self._get_json_output(response) self._check_for_errors(response, json_response) return json_response
python
def _request(self, method, path, payload=None, headers=None): url = urlparse.urljoin(self._base_url, 'api/%s' % path) headers = headers or {} headers['content-type'] = 'application/json' try: response = requests.request( method, url, auth=self._auth, data=payload, headers=headers, cert=self._cert, verify=self._verify, timeout=self._timeout ) except requests.RequestException as why: raise ApiConnectionError(str(why)) json_response = self._get_json_output(response) self._check_for_errors(response, json_response) return json_response
[ "def", "_request", "(", "self", ",", "method", ",", "path", ",", "payload", "=", "None", ",", "headers", "=", "None", ")", ":", "url", "=", "urlparse", ".", "urljoin", "(", "self", ".", "_base_url", ",", "'api/%s'", "%", "path", ")", "headers", "=", "headers", "or", "{", "}", "headers", "[", "'content-type'", "]", "=", "'application/json'", "try", ":", "response", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "auth", "=", "self", ".", "_auth", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "cert", "=", "self", ".", "_cert", ",", "verify", "=", "self", ".", "_verify", ",", "timeout", "=", "self", ".", "_timeout", ")", "except", "requests", ".", "RequestException", "as", "why", ":", "raise", "ApiConnectionError", "(", "str", "(", "why", ")", ")", "json_response", "=", "self", ".", "_get_json_output", "(", "response", ")", "self", ".", "_check_for_errors", "(", "response", ",", "json_response", ")", "return", "json_response" ]
HTTP operation. :param method: Operation type (e.g. post) :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
[ "HTTP", "operation", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L73-L104
952
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient._check_for_errors
def _check_for_errors(response, json_response): """Check payload for errors. :param response: HTTP response :param json_response: Json response :raises ApiError: Raises if the remote server encountered an error. :return: """ status_code = response.status_code try: response.raise_for_status() except requests.HTTPError as why: raise ApiError(str(why), reply_code=status_code) if isinstance(json_response, dict) and 'error' in json_response: raise ApiError(json_response['error'], reply_code=status_code)
python
def _check_for_errors(response, json_response): status_code = response.status_code try: response.raise_for_status() except requests.HTTPError as why: raise ApiError(str(why), reply_code=status_code) if isinstance(json_response, dict) and 'error' in json_response: raise ApiError(json_response['error'], reply_code=status_code)
[ "def", "_check_for_errors", "(", "response", ",", "json_response", ")", ":", "status_code", "=", "response", ".", "status_code", "try", ":", "response", ".", "raise_for_status", "(", ")", "except", "requests", ".", "HTTPError", "as", "why", ":", "raise", "ApiError", "(", "str", "(", "why", ")", ",", "reply_code", "=", "status_code", ")", "if", "isinstance", "(", "json_response", ",", "dict", ")", "and", "'error'", "in", "json_response", ":", "raise", "ApiError", "(", "json_response", "[", "'error'", "]", ",", "reply_code", "=", "status_code", ")" ]
Check payload for errors. :param response: HTTP response :param json_response: Json response :raises ApiError: Raises if the remote server encountered an error. :return:
[ "Check", "payload", "for", "errors", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L121-L137
953
eandersson/amqpstorm
amqpstorm/management/healthchecks.py
HealthChecks.get
def get(self, node=None): """Run basic healthchecks against the current node, or against a given node. Example response: > {"status":"ok"} > {"status":"failed","reason":"string"} :param node: Node name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ if not node: return self.http_client.get(HEALTHCHECKS) return self.http_client.get(HEALTHCHECKS_NODE % node)
python
def get(self, node=None): if not node: return self.http_client.get(HEALTHCHECKS) return self.http_client.get(HEALTHCHECKS_NODE % node)
[ "def", "get", "(", "self", ",", "node", "=", "None", ")", ":", "if", "not", "node", ":", "return", "self", ".", "http_client", ".", "get", "(", "HEALTHCHECKS", ")", "return", "self", ".", "http_client", ".", "get", "(", "HEALTHCHECKS_NODE", "%", "node", ")" ]
Run basic healthchecks against the current node, or against a given node. Example response: > {"status":"ok"} > {"status":"failed","reason":"string"} :param node: Node name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
[ "Run", "basic", "healthchecks", "against", "the", "current", "node", "or", "against", "a", "given", "node", "." ]
38330906c0af19eea482f43c5ce79bab98a1e064
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/healthchecks.py#L8-L25
954
cggh/scikit-allel
allel/stats/diversity.py
mean_pairwise_difference
def mean_pairwise_difference(ac, an=None, fill=np.nan): """Calculate for each variant the mean number of pairwise differences between chromosomes sampled from within a single population. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. an : array_like, int, shape (n_variants,), optional Allele numbers. If not provided, will be calculated from `ac`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide diversity, a.k.a. *pi*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac = h.count_alleles() >>> allel.mean_pairwise_difference(ac) array([0. , 0.5 , 0.66666667, 0.5 , 0. , 0.83333333, 0.83333333, 1. ]) See Also -------- sequence_diversity, windowed_diversity """ # This function calculates the mean number of pairwise differences # between haplotypes within a single population, generalising to any number # of alleles. # check inputs ac = asarray_ndim(ac, 2) # total number of haplotypes if an is None: an = np.sum(ac, axis=1) else: an = asarray_ndim(an, 1) check_dim0_aligned(ac, an) # total number of pairwise comparisons for each variant: # (an choose 2) n_pairs = an * (an - 1) / 2 # number of pairwise comparisons where there is no difference: # sum of (ac choose 2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac * (ac - 1) / 2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
python
def mean_pairwise_difference(ac, an=None, fill=np.nan): # This function calculates the mean number of pairwise differences # between haplotypes within a single population, generalising to any number # of alleles. # check inputs ac = asarray_ndim(ac, 2) # total number of haplotypes if an is None: an = np.sum(ac, axis=1) else: an = asarray_ndim(an, 1) check_dim0_aligned(ac, an) # total number of pairwise comparisons for each variant: # (an choose 2) n_pairs = an * (an - 1) / 2 # number of pairwise comparisons where there is no difference: # sum of (ac choose 2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac * (ac - 1) / 2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
[ "def", "mean_pairwise_difference", "(", "ac", ",", "an", "=", "None", ",", "fill", "=", "np", ".", "nan", ")", ":", "# This function calculates the mean number of pairwise differences", "# between haplotypes within a single population, generalising to any number", "# of alleles.", "# check inputs", "ac", "=", "asarray_ndim", "(", "ac", ",", "2", ")", "# total number of haplotypes", "if", "an", "is", "None", ":", "an", "=", "np", ".", "sum", "(", "ac", ",", "axis", "=", "1", ")", "else", ":", "an", "=", "asarray_ndim", "(", "an", ",", "1", ")", "check_dim0_aligned", "(", "ac", ",", "an", ")", "# total number of pairwise comparisons for each variant:", "# (an choose 2)", "n_pairs", "=", "an", "*", "(", "an", "-", "1", ")", "/", "2", "# number of pairwise comparisons where there is no difference:", "# sum of (ac choose 2) for each allele (i.e., number of ways to", "# choose the same allele twice)", "n_same", "=", "np", ".", "sum", "(", "ac", "*", "(", "ac", "-", "1", ")", "/", "2", ",", "axis", "=", "1", ")", "# number of pairwise differences", "n_diff", "=", "n_pairs", "-", "n_same", "# mean number of pairwise differences, accounting for cases where", "# there are no pairs", "with", "ignore_invalid", "(", ")", ":", "mpd", "=", "np", ".", "where", "(", "n_pairs", ">", "0", ",", "n_diff", "/", "n_pairs", ",", "fill", ")", "return", "mpd" ]
Calculate for each variant the mean number of pairwise differences between chromosomes sampled from within a single population. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. an : array_like, int, shape (n_variants,), optional Allele numbers. If not provided, will be calculated from `ac`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide diversity, a.k.a. *pi*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac = h.count_alleles() >>> allel.mean_pairwise_difference(ac) array([0. , 0.5 , 0.66666667, 0.5 , 0. , 0.83333333, 0.83333333, 1. ]) See Also -------- sequence_diversity, windowed_diversity
[ "Calculate", "for", "each", "variant", "the", "mean", "number", "of", "pairwise", "differences", "between", "chromosomes", "sampled", "from", "within", "a", "single", "population", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L22-L104
955
cggh/scikit-allel
allel/stats/diversity.py
mean_pairwise_difference_between
def mean_pairwise_difference_between(ac1, ac2, an1=None, an2=None, fill=np.nan): """Calculate for each variant the mean number of pairwise differences between chromosomes sampled from two different populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide divergence between two populations, a.k.a. *Dxy*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> allel.mean_pairwise_difference_between(ac1, ac2) array([0. , 0.5 , 1. , 0.5 , 0. , 1. , 0.75, nan]) See Also -------- sequence_divergence, windowed_divergence """ # This function calculates the mean number of pairwise differences # between haplotypes from two different populations, generalising to any # number of alleles. # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # total number of haplotypes sampled from each population if an1 is None: an1 = np.sum(ac1, axis=1) else: an1 = asarray_ndim(an1, 1) check_dim0_aligned(ac1, an1) if an2 is None: an2 = np.sum(ac2, axis=1) else: an2 = asarray_ndim(an2, 1) check_dim0_aligned(ac2, an2) # total number of pairwise comparisons for each variant n_pairs = an1 * an2 # number of pairwise comparisons where there is no difference: # sum of (ac1 * ac2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac1 * ac2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
python
def mean_pairwise_difference_between(ac1, ac2, an1=None, an2=None, fill=np.nan): # This function calculates the mean number of pairwise differences # between haplotypes from two different populations, generalising to any # number of alleles. # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # total number of haplotypes sampled from each population if an1 is None: an1 = np.sum(ac1, axis=1) else: an1 = asarray_ndim(an1, 1) check_dim0_aligned(ac1, an1) if an2 is None: an2 = np.sum(ac2, axis=1) else: an2 = asarray_ndim(an2, 1) check_dim0_aligned(ac2, an2) # total number of pairwise comparisons for each variant n_pairs = an1 * an2 # number of pairwise comparisons where there is no difference: # sum of (ac1 * ac2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac1 * ac2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
[ "def", "mean_pairwise_difference_between", "(", "ac1", ",", "ac2", ",", "an1", "=", "None", ",", "an2", "=", "None", ",", "fill", "=", "np", ".", "nan", ")", ":", "# This function calculates the mean number of pairwise differences", "# between haplotypes from two different populations, generalising to any", "# number of alleles.", "# check inputs", "ac1", "=", "asarray_ndim", "(", "ac1", ",", "2", ")", "ac2", "=", "asarray_ndim", "(", "ac2", ",", "2", ")", "check_dim0_aligned", "(", "ac1", ",", "ac2", ")", "ac1", ",", "ac2", "=", "ensure_dim1_aligned", "(", "ac1", ",", "ac2", ")", "# total number of haplotypes sampled from each population", "if", "an1", "is", "None", ":", "an1", "=", "np", ".", "sum", "(", "ac1", ",", "axis", "=", "1", ")", "else", ":", "an1", "=", "asarray_ndim", "(", "an1", ",", "1", ")", "check_dim0_aligned", "(", "ac1", ",", "an1", ")", "if", "an2", "is", "None", ":", "an2", "=", "np", ".", "sum", "(", "ac2", ",", "axis", "=", "1", ")", "else", ":", "an2", "=", "asarray_ndim", "(", "an2", ",", "1", ")", "check_dim0_aligned", "(", "ac2", ",", "an2", ")", "# total number of pairwise comparisons for each variant", "n_pairs", "=", "an1", "*", "an2", "# number of pairwise comparisons where there is no difference:", "# sum of (ac1 * ac2) for each allele (i.e., number of ways to", "# choose the same allele twice)", "n_same", "=", "np", ".", "sum", "(", "ac1", "*", "ac2", ",", "axis", "=", "1", ")", "# number of pairwise differences", "n_diff", "=", "n_pairs", "-", "n_same", "# mean number of pairwise differences, accounting for cases where", "# there are no pairs", "with", "ignore_invalid", "(", ")", ":", "mpd", "=", "np", ".", "where", "(", "n_pairs", ">", "0", ",", "n_diff", "/", "n_pairs", ",", "fill", ")", "return", "mpd" ]
Calculate for each variant the mean number of pairwise differences between chromosomes sampled from two different populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide divergence between two populations, a.k.a. *Dxy*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> allel.mean_pairwise_difference_between(ac1, ac2) array([0. , 0.5 , 1. , 0.5 , 0. , 1. , 0.75, nan]) See Also -------- sequence_divergence, windowed_divergence
[ "Calculate", "for", "each", "variant", "the", "mean", "number", "of", "pairwise", "differences", "between", "chromosomes", "sampled", "from", "two", "different", "populations", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L107-L203
956
cggh/scikit-allel
allel/stats/diversity.py
watterson_theta
def watterson_theta(pos, ac, start=None, stop=None, is_accessible=None): """Calculate the value of Watterson's estimator over a given region. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- theta_hat_w : float Watterson's estimator (theta hat per base). Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31) >>> theta_hat_w 0.10557184750733138 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # count segregating variants S = ac.count_segregating() # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate absolute value theta_hat_w_abs = S / a1 # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) theta_hat_w = theta_hat_w_abs / n_bases return theta_hat_w
python
def watterson_theta(pos, ac, start=None, stop=None, is_accessible=None): # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # count segregating variants S = ac.count_segregating() # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate absolute value theta_hat_w_abs = S / a1 # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) theta_hat_w = theta_hat_w_abs / n_bases return theta_hat_w
[ "def", "watterson_theta", "(", "pos", ",", "ac", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "is_accessible", "=", "None", ")", ":", "# check inputs", "if", "not", "isinstance", "(", "pos", ",", "SortedIndex", ")", ":", "pos", "=", "SortedIndex", "(", "pos", ",", "copy", "=", "False", ")", "is_accessible", "=", "asarray_ndim", "(", "is_accessible", ",", "1", ",", "allow_none", "=", "True", ")", "if", "not", "hasattr", "(", "ac", ",", "'count_segregating'", ")", ":", "ac", "=", "AlleleCountsArray", "(", "ac", ",", "copy", "=", "False", ")", "# deal with subregion", "if", "start", "is", "not", "None", "or", "stop", "is", "not", "None", ":", "loc", "=", "pos", ".", "locate_range", "(", "start", ",", "stop", ")", "pos", "=", "pos", "[", "loc", "]", "ac", "=", "ac", "[", "loc", "]", "if", "start", "is", "None", ":", "start", "=", "pos", "[", "0", "]", "if", "stop", "is", "None", ":", "stop", "=", "pos", "[", "-", "1", "]", "# count segregating variants", "S", "=", "ac", ".", "count_segregating", "(", ")", "# assume number of chromosomes sampled is constant for all variants", "n", "=", "ac", ".", "sum", "(", "axis", "=", "1", ")", ".", "max", "(", ")", "# (n-1)th harmonic number", "a1", "=", "np", ".", "sum", "(", "1", "/", "np", ".", "arange", "(", "1", ",", "n", ")", ")", "# calculate absolute value", "theta_hat_w_abs", "=", "S", "/", "a1", "# calculate value per base", "if", "is_accessible", "is", "None", ":", "n_bases", "=", "stop", "-", "start", "+", "1", "else", ":", "n_bases", "=", "np", ".", "count_nonzero", "(", "is_accessible", "[", "start", "-", "1", ":", "stop", "]", ")", "theta_hat_w", "=", "theta_hat_w_abs", "/", "n_bases", "return", "theta_hat_w" ]
Calculate the value of Watterson's estimator over a given region. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- theta_hat_w : float Watterson's estimator (theta hat per base). Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31) >>> theta_hat_w 0.10557184750733138
[ "Calculate", "the", "value", "of", "Watterson", "s", "estimator", "over", "a", "given", "region", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L667-L749
957
cggh/scikit-allel
allel/stats/diversity.py
tajima_d
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3): """Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366 """ # check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
python
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3): # check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
[ "def", "tajima_d", "(", "ac", ",", "pos", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "min_sites", "=", "3", ")", ":", "# check inputs", "if", "not", "hasattr", "(", "ac", ",", "'count_segregating'", ")", ":", "ac", "=", "AlleleCountsArray", "(", "ac", ",", "copy", "=", "False", ")", "# deal with subregion", "if", "pos", "is", "not", "None", "and", "(", "start", "is", "not", "None", "or", "stop", "is", "not", "None", ")", ":", "if", "not", "isinstance", "(", "pos", ",", "SortedIndex", ")", ":", "pos", "=", "SortedIndex", "(", "pos", ",", "copy", "=", "False", ")", "loc", "=", "pos", ".", "locate_range", "(", "start", ",", "stop", ")", "ac", "=", "ac", "[", "loc", "]", "# count segregating variants", "S", "=", "ac", ".", "count_segregating", "(", ")", "if", "S", "<", "min_sites", ":", "return", "np", ".", "nan", "# assume number of chromosomes sampled is constant for all variants", "n", "=", "ac", ".", "sum", "(", "axis", "=", "1", ")", ".", "max", "(", ")", "# (n-1)th harmonic number", "a1", "=", "np", ".", "sum", "(", "1", "/", "np", ".", "arange", "(", "1", ",", "n", ")", ")", "# calculate Watterson's theta (absolute value)", "theta_hat_w_abs", "=", "S", "/", "a1", "# calculate mean pairwise difference", "mpd", "=", "mean_pairwise_difference", "(", "ac", ",", "fill", "=", "0", ")", "# calculate theta_hat pi (sum differences over variants)", "theta_hat_pi_abs", "=", "np", ".", "sum", "(", "mpd", ")", "# N.B., both theta estimates are usually divided by the number of", "# (accessible) bases but here we want the absolute difference", "d", "=", "theta_hat_pi_abs", "-", "theta_hat_w_abs", "# calculate the denominator (standard deviation)", "a2", "=", "np", ".", "sum", "(", "1", "/", "(", "np", ".", "arange", "(", "1", ",", "n", ")", "**", "2", ")", ")", "b1", "=", "(", "n", "+", "1", ")", "/", "(", "3", "*", "(", "n", "-", "1", ")", ")", "b2", "=", "2", "*", "(", "n", "**", "2", "+", "n", "+", "3", ")", "/", "(", "9", "*", "n", "*", "(", "n", "-", "1", ")", ")", "c1", "=", "b1", "-", "(", "1", "/", "a1", ")", "c2", "=", "b2", "-", "(", "(", "n", "+", "2", ")", "/", "(", "a1", "*", "n", ")", ")", "+", "(", "a2", "/", "(", "a1", "**", "2", ")", ")", "e1", "=", "c1", "/", "a1", "e2", "=", "c2", "/", "(", "a1", "**", "2", "+", "a2", ")", "d_stdev", "=", "np", ".", "sqrt", "(", "(", "e1", "*", "S", ")", "+", "(", "e2", "*", "S", "*", "(", "S", "-", "1", ")", ")", ")", "# finally calculate Tajima's D", "D", "=", "d", "/", "d_stdev", "return", "D" ]
Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366
[ "Calculate", "the", "value", "of", "Tajima", "s", "D", "over", "a", "given", "region", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L863-L954
958
cggh/scikit-allel
allel/stats/diversity.py
moving_tajima_d
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3): """Calculate the value of Tajima's D in moving windows of `size` variants. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- d : ndarray, float, shape (n_windows,) Tajima's D. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> D = allel.moving_tajima_d(ac, size=4, step=2) >>> D array([0.1676558 , 2.01186954, 5.70029703]) """ d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop, step=step, min_sites=min_sites) return d
python
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3): d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop, step=step, min_sites=min_sites) return d
[ "def", "moving_tajima_d", "(", "ac", ",", "size", ",", "start", "=", "0", ",", "stop", "=", "None", ",", "step", "=", "None", ",", "min_sites", "=", "3", ")", ":", "d", "=", "moving_statistic", "(", "values", "=", "ac", ",", "statistic", "=", "tajima_d", ",", "size", "=", "size", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "step", "=", "step", ",", "min_sites", "=", "min_sites", ")", "return", "d" ]
Calculate the value of Tajima's D in moving windows of `size` variants. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- d : ndarray, float, shape (n_windows,) Tajima's D. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> D = allel.moving_tajima_d(ac, size=4, step=2) >>> D array([0.1676558 , 2.01186954, 5.70029703])
[ "Calculate", "the", "value", "of", "Tajima", "s", "D", "in", "moving", "windows", "of", "size", "variants", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L1067-L1115
959
cggh/scikit-allel
allel/stats/sf.py
sfs
def sfs(dac, n=None): """Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles. """ # check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s
python
def sfs(dac, n=None): # check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s
[ "def", "sfs", "(", "dac", ",", "n", "=", "None", ")", ":", "# check input", "dac", ",", "n", "=", "_check_dac_n", "(", "dac", ",", "n", ")", "# need platform integer for bincount", "dac", "=", "dac", ".", "astype", "(", "int", ",", "copy", "=", "False", ")", "# compute site frequency spectrum", "x", "=", "n", "+", "1", "s", "=", "np", ".", "bincount", "(", "dac", ",", "minlength", "=", "x", ")", "return", "s" ]
Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles.
[ "Compute", "the", "site", "frequency", "spectrum", "given", "derived", "allele", "counts", "at", "a", "set", "of", "biallelic", "variants", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L37-L66
960
cggh/scikit-allel
allel/stats/sf.py
sfs_folded
def sfs_folded(ac, n=None): """Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k. """ # check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
python
def sfs_folded(ac, n=None): # check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
[ "def", "sfs_folded", "(", "ac", ",", "n", "=", "None", ")", ":", "# check input", "ac", ",", "n", "=", "_check_ac_n", "(", "ac", ",", "n", ")", "# compute minor allele counts", "mac", "=", "np", ".", "amin", "(", "ac", ",", "axis", "=", "1", ")", "# need platform integer for bincount", "mac", "=", "mac", ".", "astype", "(", "int", ",", "copy", "=", "False", ")", "# compute folded site frequency spectrum", "x", "=", "n", "//", "2", "+", "1", "s", "=", "np", ".", "bincount", "(", "mac", ",", "minlength", "=", "x", ")", "return", "s" ]
Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k.
[ "Compute", "the", "folded", "site", "frequency", "spectrum", "given", "reference", "and", "alternate", "allele", "counts", "at", "a", "set", "of", "biallelic", "variants", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L69-L101
961
cggh/scikit-allel
allel/stats/sf.py
sfs_scaled
def sfs_scaled(dac, n=None): """Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k. """ # compute site frequency spectrum s = sfs(dac, n=n) # apply scaling s = scale_sfs(s) return s
python
def sfs_scaled(dac, n=None): # compute site frequency spectrum s = sfs(dac, n=n) # apply scaling s = scale_sfs(s) return s
[ "def", "sfs_scaled", "(", "dac", ",", "n", "=", "None", ")", ":", "# compute site frequency spectrum", "s", "=", "sfs", "(", "dac", ",", "n", "=", "n", ")", "# apply scaling", "s", "=", "scale_sfs", "(", "s", ")", "return", "s" ]
Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k.
[ "Compute", "the", "site", "frequency", "spectrum", "scaled", "such", "that", "a", "constant", "value", "is", "expected", "across", "the", "spectrum", "for", "neutral", "variation", "and", "constant", "population", "size", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L104-L130
962
cggh/scikit-allel
allel/stats/sf.py
scale_sfs
def scale_sfs(s): """Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum. """ k = np.arange(s.size) out = s * k return out
python
def scale_sfs(s): k = np.arange(s.size) out = s * k return out
[ "def", "scale_sfs", "(", "s", ")", ":", "k", "=", "np", ".", "arange", "(", "s", ".", "size", ")", "out", "=", "s", "*", "k", "return", "out" ]
Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum.
[ "Scale", "a", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L133-L149
963
cggh/scikit-allel
allel/stats/sf.py
sfs_folded_scaled
def sfs_folded_scaled(ac, n=None): """Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n). """ # check input ac, n = _check_ac_n(ac, n) # compute the site frequency spectrum s = sfs_folded(ac, n=n) # apply scaling s = scale_sfs_folded(s, n) return s
python
def sfs_folded_scaled(ac, n=None): # check input ac, n = _check_ac_n(ac, n) # compute the site frequency spectrum s = sfs_folded(ac, n=n) # apply scaling s = scale_sfs_folded(s, n) return s
[ "def", "sfs_folded_scaled", "(", "ac", ",", "n", "=", "None", ")", ":", "# check input", "ac", ",", "n", "=", "_check_ac_n", "(", "ac", ",", "n", ")", "# compute the site frequency spectrum", "s", "=", "sfs_folded", "(", "ac", ",", "n", "=", "n", ")", "# apply scaling", "s", "=", "scale_sfs_folded", "(", "s", ",", "n", ")", "return", "s" ]
Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n).
[ "Compute", "the", "folded", "site", "frequency", "spectrum", "scaled", "such", "that", "a", "constant", "value", "is", "expected", "across", "the", "spectrum", "for", "neutral", "variation", "and", "constant", "population", "size", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L152-L182
964
cggh/scikit-allel
allel/stats/sf.py
scale_sfs_folded
def scale_sfs_folded(s, n): """Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum. """ k = np.arange(s.shape[0]) out = s * k * (n - k) / n return out
python
def scale_sfs_folded(s, n): k = np.arange(s.shape[0]) out = s * k * (n - k) / n return out
[ "def", "scale_sfs_folded", "(", "s", ",", "n", ")", ":", "k", "=", "np", ".", "arange", "(", "s", ".", "shape", "[", "0", "]", ")", "out", "=", "s", "*", "k", "*", "(", "n", "-", "k", ")", "/", "n", "return", "out" ]
Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum.
[ "Scale", "a", "folded", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L185-L203
965
cggh/scikit-allel
allel/stats/sf.py
joint_sfs
def joint_sfs(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # check inputs dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) # compute site frequency spectrum x = n1 + 1 y = n2 + 1 # need platform integer for bincount tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
python
def joint_sfs(dac1, dac2, n1=None, n2=None): # check inputs dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) # compute site frequency spectrum x = n1 + 1 y = n2 + 1 # need platform integer for bincount tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
[ "def", "joint_sfs", "(", "dac1", ",", "dac2", ",", "n1", "=", "None", ",", "n2", "=", "None", ")", ":", "# check inputs", "dac1", ",", "n1", "=", "_check_dac_n", "(", "dac1", ",", "n1", ")", "dac2", ",", "n2", "=", "_check_dac_n", "(", "dac2", ",", "n2", ")", "# compute site frequency spectrum", "x", "=", "n1", "+", "1", "y", "=", "n2", "+", "1", "# need platform integer for bincount", "tmp", "=", "(", "dac1", "*", "y", "+", "dac2", ")", ".", "astype", "(", "int", ",", "copy", "=", "False", ")", "s", "=", "np", ".", "bincount", "(", "tmp", ")", "s", ".", "resize", "(", "x", ",", "y", ")", "return", "s" ]
Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population.
[ "Compute", "the", "joint", "site", "frequency", "spectrum", "between", "two", "populations", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L206-L238
966
cggh/scikit-allel
allel/stats/sf.py
joint_sfs_folded
def joint_sfs_folded(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population. """ # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute minor allele counts mac1 = np.amin(ac1, axis=1) mac2 = np.amin(ac2, axis=1) # compute site frequency spectrum x = n1//2 + 1 y = n2//2 + 1 tmp = (mac1 * y + mac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
python
def joint_sfs_folded(ac1, ac2, n1=None, n2=None): # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute minor allele counts mac1 = np.amin(ac1, axis=1) mac2 = np.amin(ac2, axis=1) # compute site frequency spectrum x = n1//2 + 1 y = n2//2 + 1 tmp = (mac1 * y + mac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
[ "def", "joint_sfs_folded", "(", "ac1", ",", "ac2", ",", "n1", "=", "None", ",", "n2", "=", "None", ")", ":", "# check inputs", "ac1", ",", "n1", "=", "_check_ac_n", "(", "ac1", ",", "n1", ")", "ac2", ",", "n2", "=", "_check_ac_n", "(", "ac2", ",", "n2", ")", "# compute minor allele counts", "mac1", "=", "np", ".", "amin", "(", "ac1", ",", "axis", "=", "1", ")", "mac2", "=", "np", ".", "amin", "(", "ac2", ",", "axis", "=", "1", ")", "# compute site frequency spectrum", "x", "=", "n1", "//", "2", "+", "1", "y", "=", "n2", "//", "2", "+", "1", "tmp", "=", "(", "mac1", "*", "y", "+", "mac2", ")", ".", "astype", "(", "int", ",", "copy", "=", "False", ")", "s", "=", "np", ".", "bincount", "(", "tmp", ")", "s", ".", "resize", "(", "x", ",", "y", ")", "return", "s" ]
Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population.
[ "Compute", "the", "joint", "folded", "site", "frequency", "spectrum", "between", "two", "populations", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L241-L277
967
cggh/scikit-allel
allel/stats/sf.py
joint_sfs_scaled
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s
python
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None): # compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s
[ "def", "joint_sfs_scaled", "(", "dac1", ",", "dac2", ",", "n1", "=", "None", ",", "n2", "=", "None", ")", ":", "# compute site frequency spectrum", "s", "=", "joint_sfs", "(", "dac1", ",", "dac2", ",", "n1", "=", "n1", ",", "n2", "=", "n2", ")", "# apply scaling", "s", "=", "scale_joint_sfs", "(", "s", ")", "return", "s" ]
Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population.
[ "Compute", "the", "joint", "site", "frequency", "spectrum", "between", "two", "populations", "scaled", "such", "that", "a", "constant", "value", "is", "expected", "across", "the", "spectrum", "for", "neutral", "variation", "constant", "population", "size", "and", "unrelated", "populations", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L280-L309
968
cggh/scikit-allel
allel/stats/sf.py
scale_joint_sfs
def scale_joint_sfs(s): """Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum. """ i = np.arange(s.shape[0])[:, None] j = np.arange(s.shape[1])[None, :] out = (s * i) * j return out
python
def scale_joint_sfs(s): i = np.arange(s.shape[0])[:, None] j = np.arange(s.shape[1])[None, :] out = (s * i) * j return out
[ "def", "scale_joint_sfs", "(", "s", ")", ":", "i", "=", "np", ".", "arange", "(", "s", ".", "shape", "[", "0", "]", ")", "[", ":", ",", "None", "]", "j", "=", "np", ".", "arange", "(", "s", ".", "shape", "[", "1", "]", ")", "[", "None", ",", ":", "]", "out", "=", "(", "s", "*", "i", ")", "*", "j", "return", "out" ]
Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum.
[ "Scale", "a", "joint", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L312-L330
969
cggh/scikit-allel
allel/stats/sf.py
joint_sfs_folded_scaled
def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population. """ # noqa # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute site frequency spectrum s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs_folded(s, n1, n2) return s
python
def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None): # noqa # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute site frequency spectrum s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs_folded(s, n1, n2) return s
[ "def", "joint_sfs_folded_scaled", "(", "ac1", ",", "ac2", ",", "n1", "=", "None", ",", "n2", "=", "None", ")", ":", "# noqa", "# check inputs", "ac1", ",", "n1", "=", "_check_ac_n", "(", "ac1", ",", "n1", ")", "ac2", ",", "n2", "=", "_check_ac_n", "(", "ac2", ",", "n2", ")", "# compute site frequency spectrum", "s", "=", "joint_sfs_folded", "(", "ac1", ",", "ac2", ",", "n1", "=", "n1", ",", "n2", "=", "n2", ")", "# apply scaling", "s", "=", "scale_joint_sfs_folded", "(", "s", ",", "n1", ",", "n2", ")", "return", "s" ]
Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population.
[ "Compute", "the", "joint", "folded", "site", "frequency", "spectrum", "between", "two", "populations", "scaled", "such", "that", "a", "constant", "value", "is", "expected", "across", "the", "spectrum", "for", "neutral", "variation", "constant", "population", "size", "and", "unrelated", "populations", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L333-L367
970
cggh/scikit-allel
allel/stats/sf.py
scale_joint_sfs_folded
def scale_joint_sfs_folded(s, n1, n2): """Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum. """ # noqa out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out
python
def scale_joint_sfs_folded(s, n1, n2): # noqa out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out
[ "def", "scale_joint_sfs_folded", "(", "s", ",", "n1", ",", "n2", ")", ":", "# noqa", "out", "=", "np", ".", "empty_like", "(", "s", ")", "for", "i", "in", "range", "(", "s", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "s", ".", "shape", "[", "1", "]", ")", ":", "out", "[", "i", ",", "j", "]", "=", "s", "[", "i", ",", "j", "]", "*", "i", "*", "j", "*", "(", "n1", "-", "i", ")", "*", "(", "n2", "-", "j", ")", "return", "out" ]
Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum.
[ "Scale", "a", "folded", "joint", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L370-L390
971
cggh/scikit-allel
allel/stats/sf.py
fold_sfs
def fold_sfs(s, n): """Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum """ # check inputs s = asarray_ndim(s, 1) assert s.shape[0] <= n + 1, 'invalid number of chromosomes' # need to check s has all entries up to n if s.shape[0] < n + 1: sn = np.zeros(n + 1, dtype=s.dtype) sn[:s.shape[0]] = s s = sn # fold nf = (n + 1) // 2 n = nf * 2 o = s[:nf] + s[nf:n][::-1] return o
python
def fold_sfs(s, n): # check inputs s = asarray_ndim(s, 1) assert s.shape[0] <= n + 1, 'invalid number of chromosomes' # need to check s has all entries up to n if s.shape[0] < n + 1: sn = np.zeros(n + 1, dtype=s.dtype) sn[:s.shape[0]] = s s = sn # fold nf = (n + 1) // 2 n = nf * 2 o = s[:nf] + s[nf:n][::-1] return o
[ "def", "fold_sfs", "(", "s", ",", "n", ")", ":", "# check inputs", "s", "=", "asarray_ndim", "(", "s", ",", "1", ")", "assert", "s", ".", "shape", "[", "0", "]", "<=", "n", "+", "1", ",", "'invalid number of chromosomes'", "# need to check s has all entries up to n", "if", "s", ".", "shape", "[", "0", "]", "<", "n", "+", "1", ":", "sn", "=", "np", ".", "zeros", "(", "n", "+", "1", ",", "dtype", "=", "s", ".", "dtype", ")", "sn", "[", ":", "s", ".", "shape", "[", "0", "]", "]", "=", "s", "s", "=", "sn", "# fold", "nf", "=", "(", "n", "+", "1", ")", "//", "2", "n", "=", "nf", "*", "2", "o", "=", "s", "[", ":", "nf", "]", "+", "s", "[", "nf", ":", "n", "]", "[", ":", ":", "-", "1", "]", "return", "o" ]
Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum
[ "Fold", "a", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L393-L425
972
cggh/scikit-allel
allel/stats/sf.py
fold_joint_sfs
def fold_joint_sfs(s, n1, n2): """Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum. """ # check inputs s = asarray_ndim(s, 2) assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes' assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes' # need to check s has all entries up to m if s.shape[0] < n1 + 1: sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype) sm[:s.shape[0]] = s s = sm # need to check s has all entries up to n if s.shape[1] < n2 + 1: sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype) sn[:, :s.shape[1]] = s s = sn # fold mf = (n1 + 1) // 2 nf = (n2 + 1) // 2 n1 = mf * 2 n2 = nf * 2 o = (s[:mf, :nf] + # top left s[mf:n1, :nf][::-1] + # top right s[:mf, nf:n2][:, ::-1] + # bottom left s[mf:n1, nf:n2][::-1, ::-1]) # bottom right return o
python
def fold_joint_sfs(s, n1, n2): # check inputs s = asarray_ndim(s, 2) assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes' assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes' # need to check s has all entries up to m if s.shape[0] < n1 + 1: sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype) sm[:s.shape[0]] = s s = sm # need to check s has all entries up to n if s.shape[1] < n2 + 1: sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype) sn[:, :s.shape[1]] = s s = sn # fold mf = (n1 + 1) // 2 nf = (n2 + 1) // 2 n1 = mf * 2 n2 = nf * 2 o = (s[:mf, :nf] + # top left s[mf:n1, :nf][::-1] + # top right s[:mf, nf:n2][:, ::-1] + # bottom left s[mf:n1, nf:n2][::-1, ::-1]) # bottom right return o
[ "def", "fold_joint_sfs", "(", "s", ",", "n1", ",", "n2", ")", ":", "# check inputs", "s", "=", "asarray_ndim", "(", "s", ",", "2", ")", "assert", "s", ".", "shape", "[", "0", "]", "<=", "n1", "+", "1", ",", "'invalid number of chromosomes'", "assert", "s", ".", "shape", "[", "1", "]", "<=", "n2", "+", "1", ",", "'invalid number of chromosomes'", "# need to check s has all entries up to m", "if", "s", ".", "shape", "[", "0", "]", "<", "n1", "+", "1", ":", "sm", "=", "np", ".", "zeros", "(", "(", "n1", "+", "1", ",", "s", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "s", ".", "dtype", ")", "sm", "[", ":", "s", ".", "shape", "[", "0", "]", "]", "=", "s", "s", "=", "sm", "# need to check s has all entries up to n", "if", "s", ".", "shape", "[", "1", "]", "<", "n2", "+", "1", ":", "sn", "=", "np", ".", "zeros", "(", "(", "s", ".", "shape", "[", "0", "]", ",", "n2", "+", "1", ")", ",", "dtype", "=", "s", ".", "dtype", ")", "sn", "[", ":", ",", ":", "s", ".", "shape", "[", "1", "]", "]", "=", "s", "s", "=", "sn", "# fold", "mf", "=", "(", "n1", "+", "1", ")", "//", "2", "nf", "=", "(", "n2", "+", "1", ")", "//", "2", "n1", "=", "mf", "*", "2", "n2", "=", "nf", "*", "2", "o", "=", "(", "s", "[", ":", "mf", ",", ":", "nf", "]", "+", "# top left", "s", "[", "mf", ":", "n1", ",", ":", "nf", "]", "[", ":", ":", "-", "1", "]", "+", "# top right", "s", "[", ":", "mf", ",", "nf", ":", "n2", "]", "[", ":", ",", ":", ":", "-", "1", "]", "+", "# bottom left", "s", "[", "mf", ":", "n1", ",", "nf", ":", "n2", "]", "[", ":", ":", "-", "1", ",", ":", ":", "-", "1", "]", ")", "# bottom right", "return", "o" ]
Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum.
[ "Fold", "a", "joint", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L428-L472
973
cggh/scikit-allel
allel/stats/sf.py
plot_sfs
def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): """Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax
python
def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax
[ "def", "plot_sfs", "(", "s", ",", "yscale", "=", "'log'", ",", "bins", "=", "None", ",", "n", "=", "None", ",", "clip_endpoints", "=", "True", ",", "label", "=", "None", ",", "plot_kwargs", "=", "None", ",", "ax", "=", "None", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "scipy", "# check inputs", "s", "=", "asarray_ndim", "(", "s", ",", "1", ")", "# setup axes", "if", "ax", "is", "None", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "# setup data", "if", "bins", "is", "None", ":", "if", "clip_endpoints", ":", "x", "=", "np", ".", "arange", "(", "1", ",", "s", ".", "shape", "[", "0", "]", "-", "1", ")", "y", "=", "s", "[", "1", ":", "-", "1", "]", "else", ":", "x", "=", "np", ".", "arange", "(", "s", ".", "shape", "[", "0", "]", ")", "y", "=", "s", "else", ":", "if", "clip_endpoints", ":", "y", ",", "b", ",", "_", "=", "scipy", ".", "stats", ".", "binned_statistic", "(", "np", ".", "arange", "(", "1", ",", "s", ".", "shape", "[", "0", "]", "-", "1", ")", ",", "values", "=", "s", "[", "1", ":", "-", "1", "]", ",", "bins", "=", "bins", ",", "statistic", "=", "'sum'", ")", "else", ":", "y", ",", "b", ",", "_", "=", "scipy", ".", "stats", ".", "binned_statistic", "(", "np", ".", "arange", "(", "s", ".", "shape", "[", "0", "]", ")", ",", "values", "=", "s", ",", "bins", "=", "bins", ",", "statistic", "=", "'sum'", ")", "# use bin midpoints for plotting", "x", "=", "(", "b", "[", ":", "-", "1", "]", "+", "b", "[", "1", ":", "]", ")", "/", "2", "if", "n", ":", "# convert allele counts to allele frequencies", "x", "=", "x", "/", "n", "ax", ".", "set_xlabel", "(", "'derived allele frequency'", ")", "else", ":", "ax", ".", "set_xlabel", "(", "'derived allele count'", ")", "# do plotting", "if", "plot_kwargs", "is", "None", ":", "plot_kwargs", "=", "dict", "(", ")", "ax", ".", "plot", "(", "x", ",", "y", ",", "label", "=", "label", ",", "*", "*", "plot_kwargs", ")", "# tidy", "ax", ".", "set_yscale", "(", "yscale", ")", "ax", ".", "set_ylabel", "(", "'site frequency'", ")", "ax", ".", "autoscale", "(", "axis", "=", "'x'", ",", "tight", "=", "True", ")", "return", "ax" ]
Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
[ "Plot", "a", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L475-L558
974
cggh/scikit-allel
allel/stats/sf.py
plot_sfs_folded
def plot_sfs_folded(*args, **kwargs): """Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_sfs(*args, **kwargs) n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
python
def plot_sfs_folded(*args, **kwargs): ax = plot_sfs(*args, **kwargs) n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
[ "def", "plot_sfs_folded", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ax", "=", "plot_sfs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "n", "=", "kwargs", ".", "get", "(", "'n'", ",", "None", ")", "if", "n", ":", "ax", ".", "set_xlabel", "(", "'minor allele frequency'", ")", "else", ":", "ax", ".", "set_xlabel", "(", "'minor allele count'", ")", "return", "ax" ]
Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
[ "Plot", "a", "folded", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L562-L598
975
cggh/scikit-allel
allel/stats/sf.py
plot_sfs_scaled
def plot_sfs_scaled(*args, **kwargs): """Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs(*args, **kwargs) ax.set_ylabel('scaled site frequency') return ax
python
def plot_sfs_scaled(*args, **kwargs): kwargs.setdefault('yscale', 'linear') ax = plot_sfs(*args, **kwargs) ax.set_ylabel('scaled site frequency') return ax
[ "def", "plot_sfs_scaled", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'yscale'", ",", "'linear'", ")", "ax", "=", "plot_sfs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "ax", ".", "set_ylabel", "(", "'scaled site frequency'", ")", "return", "ax" ]
Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
[ "Plot", "a", "scaled", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L602-L634
976
cggh/scikit-allel
allel/stats/sf.py
plot_sfs_folded_scaled
def plot_sfs_folded_scaled(*args, **kwargs): """Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs_folded(*args, **kwargs) ax.set_ylabel('scaled site frequency') n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
python
def plot_sfs_folded_scaled(*args, **kwargs): kwargs.setdefault('yscale', 'linear') ax = plot_sfs_folded(*args, **kwargs) ax.set_ylabel('scaled site frequency') n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
[ "def", "plot_sfs_folded_scaled", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'yscale'", ",", "'linear'", ")", "ax", "=", "plot_sfs_folded", "(", "*", "args", ",", "*", "*", "kwargs", ")", "ax", ".", "set_ylabel", "(", "'scaled site frequency'", ")", "n", "=", "kwargs", ".", "get", "(", "'n'", ",", "None", ")", "if", "n", ":", "ax", ".", "set_xlabel", "(", "'minor allele frequency'", ")", "else", ":", "ax", ".", "set_xlabel", "(", "'minor allele count'", ")", "return", "ax" ]
Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
[ "Plot", "a", "folded", "scaled", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L638-L675
977
cggh/scikit-allel
allel/stats/sf.py
plot_joint_sfs_scaled
def plot_joint_sfs_scaled(*args, **kwargs): """Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax
python
def plot_joint_sfs_scaled(*args, **kwargs): imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax
[ "def", "plot_joint_sfs_scaled", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "imshow_kwargs", "=", "kwargs", ".", "get", "(", "'imshow_kwargs'", ",", "dict", "(", ")", ")", "imshow_kwargs", ".", "setdefault", "(", "'norm'", ",", "None", ")", "kwargs", "[", "'imshow_kwargs'", "]", "=", "imshow_kwargs", "ax", "=", "plot_joint_sfs", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ax" ]
Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
[ "Plot", "a", "scaled", "joint", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L753-L775
978
cggh/scikit-allel
allel/stats/sf.py
plot_joint_sfs_folded_scaled
def plot_joint_sfs_folded_scaled(*args, **kwargs): """Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs_folded(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
python
def plot_joint_sfs_folded_scaled(*args, **kwargs): imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs_folded(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
[ "def", "plot_joint_sfs_folded_scaled", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "imshow_kwargs", "=", "kwargs", ".", "get", "(", "'imshow_kwargs'", ",", "dict", "(", ")", ")", "imshow_kwargs", ".", "setdefault", "(", "'norm'", ",", "None", ")", "kwargs", "[", "'imshow_kwargs'", "]", "=", "imshow_kwargs", "ax", "=", "plot_joint_sfs_folded", "(", "*", "args", ",", "*", "*", "kwargs", ")", "ax", ".", "set_xlabel", "(", "'minor allele count (population 1)'", ")", "ax", ".", "set_ylabel", "(", "'minor allele count (population 2)'", ")", "return", "ax" ]
Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
[ "Plot", "a", "scaled", "folded", "joint", "site", "frequency", "spectrum", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L779-L803
979
cggh/scikit-allel
allel/io/vcf_read.py
_prep_fields_param
def _prep_fields_param(fields): """Prepare the `fields` parameter, and determine whether or not to store samples.""" store_samples = False if fields is None: # add samples by default return True, None if isinstance(fields, str): fields = [fields] else: fields = list(fields) if 'samples' in fields: fields.remove('samples') store_samples = True elif '*' in fields: store_samples = True return store_samples, fields
python
def _prep_fields_param(fields): store_samples = False if fields is None: # add samples by default return True, None if isinstance(fields, str): fields = [fields] else: fields = list(fields) if 'samples' in fields: fields.remove('samples') store_samples = True elif '*' in fields: store_samples = True return store_samples, fields
[ "def", "_prep_fields_param", "(", "fields", ")", ":", "store_samples", "=", "False", "if", "fields", "is", "None", ":", "# add samples by default", "return", "True", ",", "None", "if", "isinstance", "(", "fields", ",", "str", ")", ":", "fields", "=", "[", "fields", "]", "else", ":", "fields", "=", "list", "(", "fields", ")", "if", "'samples'", "in", "fields", ":", "fields", ".", "remove", "(", "'samples'", ")", "store_samples", "=", "True", "elif", "'*'", "in", "fields", ":", "store_samples", "=", "True", "return", "store_samples", ",", "fields" ]
Prepare the `fields` parameter, and determine whether or not to store samples.
[ "Prepare", "the", "fields", "parameter", "and", "determine", "whether", "or", "not", "to", "store", "samples", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L51-L71
980
cggh/scikit-allel
allel/io/vcf_read.py
_chunk_iter_progress
def _chunk_iter_progress(it, log, prefix): """Wrap a chunk iterator for progress logging.""" n_variants = 0 before_all = time.time() before_chunk = before_all for chunk, chunk_length, chrom, pos in it: after_chunk = time.time() elapsed_chunk = after_chunk - before_chunk elapsed = after_chunk - before_all n_variants += chunk_length chrom = text_type(chrom, 'utf8') message = ( '%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' % (prefix, n_variants, elapsed, elapsed_chunk, int(chunk_length // elapsed_chunk)) ) if chrom: message += '; %s:%s' % (chrom, pos) print(message, file=log) log.flush() yield chunk, chunk_length, chrom, pos before_chunk = after_chunk after_all = time.time() elapsed = after_all - before_all print('%s all done (%s rows/s)' % (prefix, int(n_variants // elapsed)), file=log) log.flush()
python
def _chunk_iter_progress(it, log, prefix): n_variants = 0 before_all = time.time() before_chunk = before_all for chunk, chunk_length, chrom, pos in it: after_chunk = time.time() elapsed_chunk = after_chunk - before_chunk elapsed = after_chunk - before_all n_variants += chunk_length chrom = text_type(chrom, 'utf8') message = ( '%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' % (prefix, n_variants, elapsed, elapsed_chunk, int(chunk_length // elapsed_chunk)) ) if chrom: message += '; %s:%s' % (chrom, pos) print(message, file=log) log.flush() yield chunk, chunk_length, chrom, pos before_chunk = after_chunk after_all = time.time() elapsed = after_all - before_all print('%s all done (%s rows/s)' % (prefix, int(n_variants // elapsed)), file=log) log.flush()
[ "def", "_chunk_iter_progress", "(", "it", ",", "log", ",", "prefix", ")", ":", "n_variants", "=", "0", "before_all", "=", "time", ".", "time", "(", ")", "before_chunk", "=", "before_all", "for", "chunk", ",", "chunk_length", ",", "chrom", ",", "pos", "in", "it", ":", "after_chunk", "=", "time", ".", "time", "(", ")", "elapsed_chunk", "=", "after_chunk", "-", "before_chunk", "elapsed", "=", "after_chunk", "-", "before_all", "n_variants", "+=", "chunk_length", "chrom", "=", "text_type", "(", "chrom", ",", "'utf8'", ")", "message", "=", "(", "'%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)'", "%", "(", "prefix", ",", "n_variants", ",", "elapsed", ",", "elapsed_chunk", ",", "int", "(", "chunk_length", "//", "elapsed_chunk", ")", ")", ")", "if", "chrom", ":", "message", "+=", "'; %s:%s'", "%", "(", "chrom", ",", "pos", ")", "print", "(", "message", ",", "file", "=", "log", ")", "log", ".", "flush", "(", ")", "yield", "chunk", ",", "chunk_length", ",", "chrom", ",", "pos", "before_chunk", "=", "after_chunk", "after_all", "=", "time", ".", "time", "(", ")", "elapsed", "=", "after_all", "-", "before_all", "print", "(", "'%s all done (%s rows/s)'", "%", "(", "prefix", ",", "int", "(", "n_variants", "//", "elapsed", ")", ")", ",", "file", "=", "log", ")", "log", ".", "flush", "(", ")" ]
Wrap a chunk iterator for progress logging.
[ "Wrap", "a", "chunk", "iterator", "for", "progress", "logging", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L74-L100
981
cggh/scikit-allel
allel/io/vcf_read.py
read_vcf
def read_vcf(input, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into NumPy arrays. .. versionchanged:: 1.12.0 Now returns None if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string or file-like {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- data : dict[str, ndarray] A dictionary holding arrays, or None if no variants were found. """ # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup fields, samples, headers, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[read_vcf]') # read all chunks into a list chunks = [d[0] for d in it] if chunks: # setup output output = dict() if len(samples) > 0 and store_samples: output['samples'] = samples # find array keys keys = sorted(chunks[0].keys()) # concatenate chunks for k in keys: output[k] = np.concatenate([chunk[k] for chunk in chunks], axis=0) else: output = None return output
python
def read_vcf(input, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup fields, samples, headers, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[read_vcf]') # read all chunks into a list chunks = [d[0] for d in it] if chunks: # setup output output = dict() if len(samples) > 0 and store_samples: output['samples'] = samples # find array keys keys = sorted(chunks[0].keys()) # concatenate chunks for k in keys: output[k] = np.concatenate([chunk[k] for chunk in chunks], axis=0) else: output = None return output
[ "def", "read_vcf", "(", "input", ",", "fields", "=", "None", ",", "exclude_fields", "=", "None", ",", "rename_fields", "=", "None", ",", "types", "=", "None", ",", "numbers", "=", "None", ",", "alt_number", "=", "DEFAULT_ALT_NUMBER", ",", "fills", "=", "None", ",", "region", "=", "None", ",", "tabix", "=", "'tabix'", ",", "samples", "=", "None", ",", "transformers", "=", "None", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ",", "chunk_length", "=", "DEFAULT_CHUNK_LENGTH", ",", "log", "=", "None", ")", ":", "# samples requested?", "# noinspection PyTypeChecker", "store_samples", ",", "fields", "=", "_prep_fields_param", "(", "fields", ")", "# setup", "fields", ",", "samples", ",", "headers", ",", "it", "=", "iter_vcf_chunks", "(", "input", "=", "input", ",", "fields", "=", "fields", ",", "exclude_fields", "=", "exclude_fields", ",", "types", "=", "types", ",", "numbers", "=", "numbers", ",", "alt_number", "=", "alt_number", ",", "buffer_size", "=", "buffer_size", ",", "chunk_length", "=", "chunk_length", ",", "fills", "=", "fills", ",", "region", "=", "region", ",", "tabix", "=", "tabix", ",", "samples", "=", "samples", ",", "transformers", "=", "transformers", ")", "# handle field renaming", "if", "rename_fields", ":", "rename_fields", ",", "it", "=", "_do_rename", "(", "it", ",", "fields", "=", "fields", ",", "rename_fields", "=", "rename_fields", ",", "headers", "=", "headers", ")", "# setup progress logging", "if", "log", "is", "not", "None", ":", "it", "=", "_chunk_iter_progress", "(", "it", ",", "log", ",", "prefix", "=", "'[read_vcf]'", ")", "# read all chunks into a list", "chunks", "=", "[", "d", "[", "0", "]", "for", "d", "in", "it", "]", "if", "chunks", ":", "# setup output", "output", "=", "dict", "(", ")", "if", "len", "(", "samples", ")", ">", "0", "and", "store_samples", ":", "output", "[", "'samples'", "]", "=", "samples", "# find array keys", "keys", "=", "sorted", "(", "chunks", "[", "0", "]", ".", "keys", "(", ")", ")", "# concatenate chunks", "for", "k", "in", "keys", ":", "output", "[", "k", "]", "=", "np", ".", "concatenate", "(", "[", "chunk", "[", "k", "]", "for", "chunk", "in", "chunks", "]", ",", "axis", "=", "0", ")", "else", ":", "output", "=", "None", "return", "output" ]
Read data from a VCF file into NumPy arrays. .. versionchanged:: 1.12.0 Now returns None if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string or file-like {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- data : dict[str, ndarray] A dictionary holding arrays, or None if no variants were found.
[ "Read", "data", "from", "a", "VCF", "file", "into", "NumPy", "arrays", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L240-L345
982
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_npz
def vcf_to_npz(input, output, compressed=True, overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix=True, samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into NumPy arrays and save as a .npz file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} compressed : bool, optional If True (default), save with compression. overwrite : bool, optional {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} """ # guard condition if not overwrite and os.path.exists(output): raise ValueError('file exists at path %r; use overwrite=True to replace' % output) # read all data into memory data = read_vcf( input=input, fields=fields, exclude_fields=exclude_fields, rename_fields=rename_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, log=log, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) if data is None: # no data, bail out return # setup save function if compressed: savez = np.savez_compressed else: savez = np.savez # save as npz savez(output, **data)
python
def vcf_to_npz(input, output, compressed=True, overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix=True, samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): # guard condition if not overwrite and os.path.exists(output): raise ValueError('file exists at path %r; use overwrite=True to replace' % output) # read all data into memory data = read_vcf( input=input, fields=fields, exclude_fields=exclude_fields, rename_fields=rename_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, log=log, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) if data is None: # no data, bail out return # setup save function if compressed: savez = np.savez_compressed else: savez = np.savez # save as npz savez(output, **data)
[ "def", "vcf_to_npz", "(", "input", ",", "output", ",", "compressed", "=", "True", ",", "overwrite", "=", "False", ",", "fields", "=", "None", ",", "exclude_fields", "=", "None", ",", "rename_fields", "=", "None", ",", "types", "=", "None", ",", "numbers", "=", "None", ",", "alt_number", "=", "DEFAULT_ALT_NUMBER", ",", "fills", "=", "None", ",", "region", "=", "None", ",", "tabix", "=", "True", ",", "samples", "=", "None", ",", "transformers", "=", "None", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ",", "chunk_length", "=", "DEFAULT_CHUNK_LENGTH", ",", "log", "=", "None", ")", ":", "# guard condition", "if", "not", "overwrite", "and", "os", ".", "path", ".", "exists", "(", "output", ")", ":", "raise", "ValueError", "(", "'file exists at path %r; use overwrite=True to replace'", "%", "output", ")", "# read all data into memory", "data", "=", "read_vcf", "(", "input", "=", "input", ",", "fields", "=", "fields", ",", "exclude_fields", "=", "exclude_fields", ",", "rename_fields", "=", "rename_fields", ",", "types", "=", "types", ",", "numbers", "=", "numbers", ",", "alt_number", "=", "alt_number", ",", "buffer_size", "=", "buffer_size", ",", "chunk_length", "=", "chunk_length", ",", "log", "=", "log", ",", "fills", "=", "fills", ",", "region", "=", "region", ",", "tabix", "=", "tabix", ",", "samples", "=", "samples", ",", "transformers", "=", "transformers", ")", "if", "data", "is", "None", ":", "# no data, bail out", "return", "# setup save function", "if", "compressed", ":", "savez", "=", "np", ".", "savez_compressed", "else", ":", "savez", "=", "np", ".", "savez", "# save as npz", "savez", "(", "output", ",", "*", "*", "data", ")" ]
Read data from a VCF file into NumPy arrays and save as a .npz file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} compressed : bool, optional If True (default), save with compression. overwrite : bool, optional {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log}
[ "Read", "data", "from", "a", "VCF", "file", "into", "NumPy", "arrays", "and", "save", "as", "a", ".", "npz", "file", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L375-L463
983
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_hdf5
def vcf_to_hdf5(input, output, group='/', compression='gzip', compression_opts=1, shuffle=False, overwrite=False, vlen=True, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): """Read data from a VCF file and load into an HDF5 file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination HDF5 file to store data in. compression : string Compression algorithm, e.g., 'gzip' (default). compression_opts : int Compression level, e.g., 1 (default). shuffle : bool Use byte shuffling, which may improve compression (default is False). overwrite : bool {overwrite} vlen : bool If True, store variable length strings. Note that there is considerable storage overhead for variable length strings in HDF5, and leaving this option as True ( default) may lead to large file sizes. If False, all strings will be stored in the HDF5 file as fixed length strings, even if they are specified as 'object' type. In this case, the string length for any field with 'object' type will be determined based on the maximum length of strings found in the first chunk, and this may cause values to be truncated if longer values are found in later chunks. To avoid truncation and large file sizes, manually set the type for all string fields to an explicit fixed length string type, e.g., 'S10' for a field where you know at most 10 characters are required. fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log} """ import h5py # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_hdf5]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return with h5py.File(output, mode='a') as h5f: # obtain root group that data will be stored into root = h5f.require_group(group) if len(samples) > 0 and store_samples: # store samples name = 'samples' if name in root: if overwrite: del root[name] else: raise ValueError( 'dataset exists at path %r; use overwrite=True to replace' % name) if samples.dtype.kind == 'O': if vlen: t = h5py.special_dtype(vlen=str) else: samples = samples.astype('S') t = samples.dtype else: t = samples.dtype root.create_dataset(name, data=samples, chunks=None, dtype=t) # setup datasets # noinspection PyTypeChecker keys = _hdf5_setup_datasets( chunk=chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compression=compression, compression_opts=compression_opts, shuffle=shuffle, overwrite=overwrite, headers=headers, vlen=vlen ) # store first chunk _hdf5_store_chunk(root, keys, chunk, vlen) # store remaining chunks for chunk, _, _, _ in it: _hdf5_store_chunk(root, keys, chunk, vlen)
python
def vcf_to_hdf5(input, output, group='/', compression='gzip', compression_opts=1, shuffle=False, overwrite=False, vlen=True, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): import h5py # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_hdf5]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return with h5py.File(output, mode='a') as h5f: # obtain root group that data will be stored into root = h5f.require_group(group) if len(samples) > 0 and store_samples: # store samples name = 'samples' if name in root: if overwrite: del root[name] else: raise ValueError( 'dataset exists at path %r; use overwrite=True to replace' % name) if samples.dtype.kind == 'O': if vlen: t = h5py.special_dtype(vlen=str) else: samples = samples.astype('S') t = samples.dtype else: t = samples.dtype root.create_dataset(name, data=samples, chunks=None, dtype=t) # setup datasets # noinspection PyTypeChecker keys = _hdf5_setup_datasets( chunk=chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compression=compression, compression_opts=compression_opts, shuffle=shuffle, overwrite=overwrite, headers=headers, vlen=vlen ) # store first chunk _hdf5_store_chunk(root, keys, chunk, vlen) # store remaining chunks for chunk, _, _, _ in it: _hdf5_store_chunk(root, keys, chunk, vlen)
[ "def", "vcf_to_hdf5", "(", "input", ",", "output", ",", "group", "=", "'/'", ",", "compression", "=", "'gzip'", ",", "compression_opts", "=", "1", ",", "shuffle", "=", "False", ",", "overwrite", "=", "False", ",", "vlen", "=", "True", ",", "fields", "=", "None", ",", "exclude_fields", "=", "None", ",", "rename_fields", "=", "None", ",", "types", "=", "None", ",", "numbers", "=", "None", ",", "alt_number", "=", "DEFAULT_ALT_NUMBER", ",", "fills", "=", "None", ",", "region", "=", "None", ",", "tabix", "=", "'tabix'", ",", "samples", "=", "None", ",", "transformers", "=", "None", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ",", "chunk_length", "=", "DEFAULT_CHUNK_LENGTH", ",", "chunk_width", "=", "DEFAULT_CHUNK_WIDTH", ",", "log", "=", "None", ")", ":", "import", "h5py", "# samples requested?", "# noinspection PyTypeChecker", "store_samples", ",", "fields", "=", "_prep_fields_param", "(", "fields", ")", "# setup chunk iterator", "fields", ",", "samples", ",", "headers", ",", "it", "=", "iter_vcf_chunks", "(", "input", ",", "fields", "=", "fields", ",", "exclude_fields", "=", "exclude_fields", ",", "types", "=", "types", ",", "numbers", "=", "numbers", ",", "alt_number", "=", "alt_number", ",", "buffer_size", "=", "buffer_size", ",", "chunk_length", "=", "chunk_length", ",", "fills", "=", "fills", ",", "region", "=", "region", ",", "tabix", "=", "tabix", ",", "samples", "=", "samples", ",", "transformers", "=", "transformers", ")", "# handle field renaming", "if", "rename_fields", ":", "rename_fields", ",", "it", "=", "_do_rename", "(", "it", ",", "fields", "=", "fields", ",", "rename_fields", "=", "rename_fields", ",", "headers", "=", "headers", ")", "# setup progress logging", "if", "log", "is", "not", "None", ":", "it", "=", "_chunk_iter_progress", "(", "it", ",", "log", ",", "prefix", "=", "'[vcf_to_hdf5]'", ")", "# read first chunk", "try", ":", "chunk", ",", "_", ",", "_", ",", "_", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "# no data, bail out", "return", "with", "h5py", ".", "File", "(", "output", ",", "mode", "=", "'a'", ")", "as", "h5f", ":", "# obtain root group that data will be stored into", "root", "=", "h5f", ".", "require_group", "(", "group", ")", "if", "len", "(", "samples", ")", ">", "0", "and", "store_samples", ":", "# store samples", "name", "=", "'samples'", "if", "name", "in", "root", ":", "if", "overwrite", ":", "del", "root", "[", "name", "]", "else", ":", "raise", "ValueError", "(", "'dataset exists at path %r; use overwrite=True to replace'", "%", "name", ")", "if", "samples", ".", "dtype", ".", "kind", "==", "'O'", ":", "if", "vlen", ":", "t", "=", "h5py", ".", "special_dtype", "(", "vlen", "=", "str", ")", "else", ":", "samples", "=", "samples", ".", "astype", "(", "'S'", ")", "t", "=", "samples", ".", "dtype", "else", ":", "t", "=", "samples", ".", "dtype", "root", ".", "create_dataset", "(", "name", ",", "data", "=", "samples", ",", "chunks", "=", "None", ",", "dtype", "=", "t", ")", "# setup datasets", "# noinspection PyTypeChecker", "keys", "=", "_hdf5_setup_datasets", "(", "chunk", "=", "chunk", ",", "root", "=", "root", ",", "chunk_length", "=", "chunk_length", ",", "chunk_width", "=", "chunk_width", ",", "compression", "=", "compression", ",", "compression_opts", "=", "compression_opts", ",", "shuffle", "=", "shuffle", ",", "overwrite", "=", "overwrite", ",", "headers", "=", "headers", ",", "vlen", "=", "vlen", ")", "# store first chunk", "_hdf5_store_chunk", "(", "root", ",", "keys", ",", "chunk", ",", "vlen", ")", "# store remaining chunks", "for", "chunk", ",", "_", ",", "_", ",", "_", "in", "it", ":", "_hdf5_store_chunk", "(", "root", ",", "keys", ",", "chunk", ",", "vlen", ")" ]
Read data from a VCF file and load into an HDF5 file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination HDF5 file to store data in. compression : string Compression algorithm, e.g., 'gzip' (default). compression_opts : int Compression level, e.g., 1 (default). shuffle : bool Use byte shuffling, which may improve compression (default is False). overwrite : bool {overwrite} vlen : bool If True, store variable length strings. Note that there is considerable storage overhead for variable length strings in HDF5, and leaving this option as True ( default) may lead to large file sizes. If False, all strings will be stored in the HDF5 file as fixed length strings, even if they are specified as 'object' type. In this case, the string length for any field with 'object' type will be determined based on the maximum length of strings found in the first chunk, and this may cause values to be truncated if longer values are found in later chunks. To avoid truncation and large file sizes, manually set the type for all string fields to an explicit fixed length string type, e.g., 'S10' for a field where you know at most 10 characters are required. fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log}
[ "Read", "data", "from", "a", "VCF", "file", "and", "load", "into", "an", "HDF5", "file", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L600-L757
984
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_zarr
def vcf_to_zarr(input, output, group='/', compressor='default', overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): """Read data from a VCF file and load into a Zarr on-disk store. .. versionchanged:: 1.12.0 Now will not create any output files if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination Zarr hierarchy to store data in. compressor : compressor Compression algorithm, e.g., zarr.Blosc(cname='zstd', clevel=1, shuffle=1). overwrite : bool {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log} """ import zarr # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # check for any case-insensitive duplicate fields # https://github.com/cggh/scikit-allel/issues/215 ci_field_index = defaultdict(list) for f in fields: if rename_fields: f = rename_fields.get(f, f) ci_field_index[f.lower()].append(f) for k, v in ci_field_index.items(): if len(v) > 1: msg = textwrap.fill( 'Found two or more fields with the same name when compared ' 'case-insensitive: {!r}; this is not supported because it causes ' 'problems on platforms with a case-insensitive file system, which is ' 'usually the default on Windows and Mac OS. Please rename fields so they ' 'are distinct under a case-insensitive comparison via the ' 'rename_fields argument.'.format(v), width=80) raise ValueError(msg) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_zarr]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return # open root group root = zarr.open_group(output, mode='a', path=group) if len(samples) > 0 and store_samples: # store samples if samples.dtype.kind == 'O': if PY2: dtype = 'unicode' else: dtype = 'str' else: dtype = samples.dtype root.create_dataset('samples', data=samples, compressor=None, overwrite=overwrite, dtype=dtype) # setup datasets # noinspection PyTypeChecker keys = _zarr_setup_datasets( chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, overwrite=overwrite, headers=headers ) # store first chunk _zarr_store_chunk(root, keys, chunk) # store remaining chunks for chunk, _, _, _ in it: _zarr_store_chunk(root, keys, chunk)
python
def vcf_to_zarr(input, output, group='/', compressor='default', overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): import zarr # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # check for any case-insensitive duplicate fields # https://github.com/cggh/scikit-allel/issues/215 ci_field_index = defaultdict(list) for f in fields: if rename_fields: f = rename_fields.get(f, f) ci_field_index[f.lower()].append(f) for k, v in ci_field_index.items(): if len(v) > 1: msg = textwrap.fill( 'Found two or more fields with the same name when compared ' 'case-insensitive: {!r}; this is not supported because it causes ' 'problems on platforms with a case-insensitive file system, which is ' 'usually the default on Windows and Mac OS. Please rename fields so they ' 'are distinct under a case-insensitive comparison via the ' 'rename_fields argument.'.format(v), width=80) raise ValueError(msg) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_zarr]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return # open root group root = zarr.open_group(output, mode='a', path=group) if len(samples) > 0 and store_samples: # store samples if samples.dtype.kind == 'O': if PY2: dtype = 'unicode' else: dtype = 'str' else: dtype = samples.dtype root.create_dataset('samples', data=samples, compressor=None, overwrite=overwrite, dtype=dtype) # setup datasets # noinspection PyTypeChecker keys = _zarr_setup_datasets( chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, overwrite=overwrite, headers=headers ) # store first chunk _zarr_store_chunk(root, keys, chunk) # store remaining chunks for chunk, _, _, _ in it: _zarr_store_chunk(root, keys, chunk)
[ "def", "vcf_to_zarr", "(", "input", ",", "output", ",", "group", "=", "'/'", ",", "compressor", "=", "'default'", ",", "overwrite", "=", "False", ",", "fields", "=", "None", ",", "exclude_fields", "=", "None", ",", "rename_fields", "=", "None", ",", "types", "=", "None", ",", "numbers", "=", "None", ",", "alt_number", "=", "DEFAULT_ALT_NUMBER", ",", "fills", "=", "None", ",", "region", "=", "None", ",", "tabix", "=", "'tabix'", ",", "samples", "=", "None", ",", "transformers", "=", "None", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ",", "chunk_length", "=", "DEFAULT_CHUNK_LENGTH", ",", "chunk_width", "=", "DEFAULT_CHUNK_WIDTH", ",", "log", "=", "None", ")", ":", "import", "zarr", "# samples requested?", "# noinspection PyTypeChecker", "store_samples", ",", "fields", "=", "_prep_fields_param", "(", "fields", ")", "# setup chunk iterator", "fields", ",", "samples", ",", "headers", ",", "it", "=", "iter_vcf_chunks", "(", "input", ",", "fields", "=", "fields", ",", "exclude_fields", "=", "exclude_fields", ",", "types", "=", "types", ",", "numbers", "=", "numbers", ",", "alt_number", "=", "alt_number", ",", "buffer_size", "=", "buffer_size", ",", "chunk_length", "=", "chunk_length", ",", "fills", "=", "fills", ",", "region", "=", "region", ",", "tabix", "=", "tabix", ",", "samples", "=", "samples", ",", "transformers", "=", "transformers", ")", "# handle field renaming", "if", "rename_fields", ":", "rename_fields", ",", "it", "=", "_do_rename", "(", "it", ",", "fields", "=", "fields", ",", "rename_fields", "=", "rename_fields", ",", "headers", "=", "headers", ")", "# check for any case-insensitive duplicate fields", "# https://github.com/cggh/scikit-allel/issues/215", "ci_field_index", "=", "defaultdict", "(", "list", ")", "for", "f", "in", "fields", ":", "if", "rename_fields", ":", "f", "=", "rename_fields", ".", "get", "(", "f", ",", "f", ")", "ci_field_index", "[", "f", ".", "lower", "(", ")", "]", ".", "append", "(", "f", ")", "for", "k", ",", "v", "in", "ci_field_index", ".", "items", "(", ")", ":", "if", "len", "(", "v", ")", ">", "1", ":", "msg", "=", "textwrap", ".", "fill", "(", "'Found two or more fields with the same name when compared '", "'case-insensitive: {!r}; this is not supported because it causes '", "'problems on platforms with a case-insensitive file system, which is '", "'usually the default on Windows and Mac OS. Please rename fields so they '", "'are distinct under a case-insensitive comparison via the '", "'rename_fields argument.'", ".", "format", "(", "v", ")", ",", "width", "=", "80", ")", "raise", "ValueError", "(", "msg", ")", "# setup progress logging", "if", "log", "is", "not", "None", ":", "it", "=", "_chunk_iter_progress", "(", "it", ",", "log", ",", "prefix", "=", "'[vcf_to_zarr]'", ")", "# read first chunk", "try", ":", "chunk", ",", "_", ",", "_", ",", "_", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "# no data, bail out", "return", "# open root group", "root", "=", "zarr", ".", "open_group", "(", "output", ",", "mode", "=", "'a'", ",", "path", "=", "group", ")", "if", "len", "(", "samples", ")", ">", "0", "and", "store_samples", ":", "# store samples", "if", "samples", ".", "dtype", ".", "kind", "==", "'O'", ":", "if", "PY2", ":", "dtype", "=", "'unicode'", "else", ":", "dtype", "=", "'str'", "else", ":", "dtype", "=", "samples", ".", "dtype", "root", ".", "create_dataset", "(", "'samples'", ",", "data", "=", "samples", ",", "compressor", "=", "None", ",", "overwrite", "=", "overwrite", ",", "dtype", "=", "dtype", ")", "# setup datasets", "# noinspection PyTypeChecker", "keys", "=", "_zarr_setup_datasets", "(", "chunk", ",", "root", "=", "root", ",", "chunk_length", "=", "chunk_length", ",", "chunk_width", "=", "chunk_width", ",", "compressor", "=", "compressor", ",", "overwrite", "=", "overwrite", ",", "headers", "=", "headers", ")", "# store first chunk", "_zarr_store_chunk", "(", "root", ",", "keys", ",", "chunk", ")", "# store remaining chunks", "for", "chunk", ",", "_", ",", "_", ",", "_", "in", "it", ":", "_zarr_store_chunk", "(", "root", ",", "keys", ",", "chunk", ")" ]
Read data from a VCF file and load into a Zarr on-disk store. .. versionchanged:: 1.12.0 Now will not create any output files if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination Zarr hierarchy to store data in. compressor : compressor Compression algorithm, e.g., zarr.Blosc(cname='zstd', clevel=1, shuffle=1). overwrite : bool {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log}
[ "Read", "data", "from", "a", "VCF", "file", "and", "load", "into", "a", "Zarr", "on", "-", "disk", "store", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L846-L993
985
cggh/scikit-allel
allel/io/vcf_read.py
iter_vcf_chunks
def iter_vcf_chunks(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH): """Iterate over chunks of data from a VCF file as NumPy arrays. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} Returns ------- fields : list of strings Normalised names of fields that will be extracted. samples : ndarray Samples for which data will be extracted. headers : VCFHeaders Tuple of metadata extracted from VCF headers. it : iterator Chunk iterator. """ # setup commmon keyword args kwds = dict(fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, chunk_length=chunk_length, fills=fills, samples=samples, region=region) # setup input stream stream = _setup_input_stream(input=input, region=region, tabix=tabix, buffer_size=buffer_size) # setup iterator fields, samples, headers, it = _iter_vcf_stream(stream, **kwds) # setup transformers if transformers is not None: # API flexibility if not isinstance(transformers, (list, tuple)): transformers = [transformers] for trans in transformers: fields = trans.transform_fields(fields) it = _chunk_iter_transform(it, transformers) return fields, samples, headers, it
python
def iter_vcf_chunks(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH): # setup commmon keyword args kwds = dict(fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, chunk_length=chunk_length, fills=fills, samples=samples, region=region) # setup input stream stream = _setup_input_stream(input=input, region=region, tabix=tabix, buffer_size=buffer_size) # setup iterator fields, samples, headers, it = _iter_vcf_stream(stream, **kwds) # setup transformers if transformers is not None: # API flexibility if not isinstance(transformers, (list, tuple)): transformers = [transformers] for trans in transformers: fields = trans.transform_fields(fields) it = _chunk_iter_transform(it, transformers) return fields, samples, headers, it
[ "def", "iter_vcf_chunks", "(", "input", ",", "fields", "=", "None", ",", "exclude_fields", "=", "None", ",", "types", "=", "None", ",", "numbers", "=", "None", ",", "alt_number", "=", "DEFAULT_ALT_NUMBER", ",", "fills", "=", "None", ",", "region", "=", "None", ",", "tabix", "=", "'tabix'", ",", "samples", "=", "None", ",", "transformers", "=", "None", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ",", "chunk_length", "=", "DEFAULT_CHUNK_LENGTH", ")", ":", "# setup commmon keyword args", "kwds", "=", "dict", "(", "fields", "=", "fields", ",", "exclude_fields", "=", "exclude_fields", ",", "types", "=", "types", ",", "numbers", "=", "numbers", ",", "alt_number", "=", "alt_number", ",", "chunk_length", "=", "chunk_length", ",", "fills", "=", "fills", ",", "samples", "=", "samples", ",", "region", "=", "region", ")", "# setup input stream", "stream", "=", "_setup_input_stream", "(", "input", "=", "input", ",", "region", "=", "region", ",", "tabix", "=", "tabix", ",", "buffer_size", "=", "buffer_size", ")", "# setup iterator", "fields", ",", "samples", ",", "headers", ",", "it", "=", "_iter_vcf_stream", "(", "stream", ",", "*", "*", "kwds", ")", "# setup transformers", "if", "transformers", "is", "not", "None", ":", "# API flexibility", "if", "not", "isinstance", "(", "transformers", ",", "(", "list", ",", "tuple", ")", ")", ":", "transformers", "=", "[", "transformers", "]", "for", "trans", "in", "transformers", ":", "fields", "=", "trans", ".", "transform_fields", "(", "fields", ")", "it", "=", "_chunk_iter_transform", "(", "it", ",", "transformers", ")", "return", "fields", ",", "samples", ",", "headers", ",", "it" ]
Iterate over chunks of data from a VCF file as NumPy arrays. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} Returns ------- fields : list of strings Normalised names of fields that will be extracted. samples : ndarray Samples for which data will be extracted. headers : VCFHeaders Tuple of metadata extracted from VCF headers. it : iterator Chunk iterator.
[ "Iterate", "over", "chunks", "of", "data", "from", "a", "VCF", "file", "as", "NumPy", "arrays", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L1080-L1158
986
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_dataframe
def vcf_to_dataframe(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into a pandas DataFrame. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- df : pandas.DataFrame """ import pandas # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_dataframe]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = pandas.concat([_chunk_to_dataframe(fields, chunk) for chunk in chunks]) return output
python
def vcf_to_dataframe(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): import pandas # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_dataframe]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = pandas.concat([_chunk_to_dataframe(fields, chunk) for chunk in chunks]) return output
[ "def", "vcf_to_dataframe", "(", "input", ",", "fields", "=", "None", ",", "exclude_fields", "=", "None", ",", "types", "=", "None", ",", "numbers", "=", "None", ",", "alt_number", "=", "DEFAULT_ALT_NUMBER", ",", "fills", "=", "None", ",", "region", "=", "None", ",", "tabix", "=", "'tabix'", ",", "transformers", "=", "None", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ",", "chunk_length", "=", "DEFAULT_CHUNK_LENGTH", ",", "log", "=", "None", ")", ":", "import", "pandas", "# samples requested?", "# noinspection PyTypeChecker", "_", ",", "fields", "=", "_prep_fields_param", "(", "fields", ")", "# setup", "fields", ",", "_", ",", "_", ",", "it", "=", "iter_vcf_chunks", "(", "input", "=", "input", ",", "fields", "=", "fields", ",", "exclude_fields", "=", "exclude_fields", ",", "types", "=", "types", ",", "numbers", "=", "numbers", ",", "alt_number", "=", "alt_number", ",", "buffer_size", "=", "buffer_size", ",", "chunk_length", "=", "chunk_length", ",", "fills", "=", "fills", ",", "region", "=", "region", ",", "tabix", "=", "tabix", ",", "samples", "=", "[", "]", ",", "transformers", "=", "transformers", ")", "# setup progress logging", "if", "log", "is", "not", "None", ":", "it", "=", "_chunk_iter_progress", "(", "it", ",", "log", ",", "prefix", "=", "'[vcf_to_dataframe]'", ")", "# read all chunks into a list", "chunks", "=", "[", "d", "[", "0", "]", "for", "d", "in", "it", "]", "# setup output", "output", "=", "None", "if", "chunks", ":", "# concatenate chunks", "output", "=", "pandas", ".", "concat", "(", "[", "_chunk_to_dataframe", "(", "fields", ",", "chunk", ")", "for", "chunk", "in", "chunks", "]", ")", "return", "output" ]
Read data from a VCF file into a pandas DataFrame. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- df : pandas.DataFrame
[ "Read", "data", "from", "a", "VCF", "file", "into", "a", "pandas", "DataFrame", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L1801-L1881
987
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_recarray
def vcf_to_recarray(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into a NumPy recarray. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- ra : np.rec.array """ # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup chunk iterator # N.B., set samples to empty list so we don't get any calldata fields fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_recarray]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = np.concatenate([_chunk_to_recarray(fields, chunk) for chunk in chunks]) return output
python
def vcf_to_recarray(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup chunk iterator # N.B., set samples to empty list so we don't get any calldata fields fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_recarray]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = np.concatenate([_chunk_to_recarray(fields, chunk) for chunk in chunks]) return output
[ "def", "vcf_to_recarray", "(", "input", ",", "fields", "=", "None", ",", "exclude_fields", "=", "None", ",", "types", "=", "None", ",", "numbers", "=", "None", ",", "alt_number", "=", "DEFAULT_ALT_NUMBER", ",", "fills", "=", "None", ",", "region", "=", "None", ",", "tabix", "=", "'tabix'", ",", "transformers", "=", "None", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ",", "chunk_length", "=", "DEFAULT_CHUNK_LENGTH", ",", "log", "=", "None", ")", ":", "# samples requested?", "# noinspection PyTypeChecker", "_", ",", "fields", "=", "_prep_fields_param", "(", "fields", ")", "# setup chunk iterator", "# N.B., set samples to empty list so we don't get any calldata fields", "fields", ",", "_", ",", "_", ",", "it", "=", "iter_vcf_chunks", "(", "input", "=", "input", ",", "fields", "=", "fields", ",", "exclude_fields", "=", "exclude_fields", ",", "types", "=", "types", ",", "numbers", "=", "numbers", ",", "alt_number", "=", "alt_number", ",", "buffer_size", "=", "buffer_size", ",", "chunk_length", "=", "chunk_length", ",", "fills", "=", "fills", ",", "region", "=", "region", ",", "tabix", "=", "tabix", ",", "samples", "=", "[", "]", ",", "transformers", "=", "transformers", ")", "# setup progress logging", "if", "log", "is", "not", "None", ":", "it", "=", "_chunk_iter_progress", "(", "it", ",", "log", ",", "prefix", "=", "'[vcf_to_recarray]'", ")", "# read all chunks into a list", "chunks", "=", "[", "d", "[", "0", "]", "for", "d", "in", "it", "]", "# setup output", "output", "=", "None", "if", "chunks", ":", "# concatenate chunks", "output", "=", "np", ".", "concatenate", "(", "[", "_chunk_to_recarray", "(", "fields", ",", "chunk", ")", "for", "chunk", "in", "chunks", "]", ")", "return", "output" ]
Read data from a VCF file into a NumPy recarray. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- ra : np.rec.array
[ "Read", "data", "from", "a", "VCF", "file", "into", "a", "NumPy", "recarray", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L2020-L2098
988
cggh/scikit-allel
allel/io/fasta.py
write_fasta
def write_fasta(path, sequences, names, mode='w', width=80): """Write nucleotide sequences stored as numpy arrays to a FASTA file. Parameters ---------- path : string File path. sequences : sequence of arrays One or more ndarrays of dtype 'S1' containing the sequences. names : sequence of strings Names of the sequences. mode : string, optional Use 'a' to append to an existing file. width : int, optional Maximum line width. """ # check inputs if isinstance(sequences, np.ndarray): # single sequence sequences = [sequences] names = [names] if len(sequences) != len(names): raise ValueError('must provide the same number of sequences and names') for sequence in sequences: if sequence.dtype != np.dtype('S1'): raise ValueError('expected S1 dtype, found %r' % sequence.dtype) # force binary mode mode = 'ab' if 'a' in mode else 'wb' # write to file with open(path, mode=mode) as fasta: for name, sequence in zip(names, sequences): # force bytes if isinstance(name, text_type): name = name.encode('ascii') header = b'>' + name + b'\n' fasta.write(header) for i in range(0, sequence.size, width): line = sequence[i:i+width].tostring() + b'\n' fasta.write(line)
python
def write_fasta(path, sequences, names, mode='w', width=80): # check inputs if isinstance(sequences, np.ndarray): # single sequence sequences = [sequences] names = [names] if len(sequences) != len(names): raise ValueError('must provide the same number of sequences and names') for sequence in sequences: if sequence.dtype != np.dtype('S1'): raise ValueError('expected S1 dtype, found %r' % sequence.dtype) # force binary mode mode = 'ab' if 'a' in mode else 'wb' # write to file with open(path, mode=mode) as fasta: for name, sequence in zip(names, sequences): # force bytes if isinstance(name, text_type): name = name.encode('ascii') header = b'>' + name + b'\n' fasta.write(header) for i in range(0, sequence.size, width): line = sequence[i:i+width].tostring() + b'\n' fasta.write(line)
[ "def", "write_fasta", "(", "path", ",", "sequences", ",", "names", ",", "mode", "=", "'w'", ",", "width", "=", "80", ")", ":", "# check inputs", "if", "isinstance", "(", "sequences", ",", "np", ".", "ndarray", ")", ":", "# single sequence", "sequences", "=", "[", "sequences", "]", "names", "=", "[", "names", "]", "if", "len", "(", "sequences", ")", "!=", "len", "(", "names", ")", ":", "raise", "ValueError", "(", "'must provide the same number of sequences and names'", ")", "for", "sequence", "in", "sequences", ":", "if", "sequence", ".", "dtype", "!=", "np", ".", "dtype", "(", "'S1'", ")", ":", "raise", "ValueError", "(", "'expected S1 dtype, found %r'", "%", "sequence", ".", "dtype", ")", "# force binary mode", "mode", "=", "'ab'", "if", "'a'", "in", "mode", "else", "'wb'", "# write to file", "with", "open", "(", "path", ",", "mode", "=", "mode", ")", "as", "fasta", ":", "for", "name", ",", "sequence", "in", "zip", "(", "names", ",", "sequences", ")", ":", "# force bytes", "if", "isinstance", "(", "name", ",", "text_type", ")", ":", "name", "=", "name", ".", "encode", "(", "'ascii'", ")", "header", "=", "b'>'", "+", "name", "+", "b'\\n'", "fasta", ".", "write", "(", "header", ")", "for", "i", "in", "range", "(", "0", ",", "sequence", ".", "size", ",", "width", ")", ":", "line", "=", "sequence", "[", "i", ":", "i", "+", "width", "]", ".", "tostring", "(", ")", "+", "b'\\n'", "fasta", ".", "write", "(", "line", ")" ]
Write nucleotide sequences stored as numpy arrays to a FASTA file. Parameters ---------- path : string File path. sequences : sequence of arrays One or more ndarrays of dtype 'S1' containing the sequences. names : sequence of strings Names of the sequences. mode : string, optional Use 'a' to append to an existing file. width : int, optional Maximum line width.
[ "Write", "nucleotide", "sequences", "stored", "as", "numpy", "arrays", "to", "a", "FASTA", "file", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/fasta.py#L11-L54
989
cggh/scikit-allel
allel/stats/hw.py
heterozygosity_observed
def heterozygosity_observed(g, fill=np.nan): """Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho
python
def heterozygosity_observed(g, fill=np.nan): # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho
[ "def", "heterozygosity_observed", "(", "g", ",", "fill", "=", "np", ".", "nan", ")", ":", "# check inputs", "if", "not", "hasattr", "(", "g", ",", "'count_het'", ")", "or", "not", "hasattr", "(", "g", ",", "'count_called'", ")", ":", "g", "=", "GenotypeArray", "(", "g", ",", "copy", "=", "False", ")", "# count hets", "n_het", "=", "np", ".", "asarray", "(", "g", ".", "count_het", "(", "axis", "=", "1", ")", ")", "n_called", "=", "np", ".", "asarray", "(", "g", ".", "count_called", "(", "axis", "=", "1", ")", ")", "# calculate rate of observed heterozygosity, accounting for variants", "# where all calls are missing", "with", "ignore_invalid", "(", ")", ":", "ho", "=", "np", ".", "where", "(", "n_called", ">", "0", ",", "n_het", "/", "n_called", ",", "fill", ")", "return", "ho" ]
Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ])
[ "Calculate", "the", "rate", "of", "observed", "heterozygosity", "for", "each", "variant", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L12-L55
990
cggh/scikit-allel
allel/stats/hw.py
heterozygosity_expected
def heterozygosity_expected(af, ploidy, fill=np.nan): """Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ]) """ # check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out
python
def heterozygosity_expected(af, ploidy, fill=np.nan): # check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out
[ "def", "heterozygosity_expected", "(", "af", ",", "ploidy", ",", "fill", "=", "np", ".", "nan", ")", ":", "# check inputs", "af", "=", "asarray_ndim", "(", "af", ",", "2", ")", "# calculate expected heterozygosity", "out", "=", "1", "-", "np", ".", "sum", "(", "np", ".", "power", "(", "af", ",", "ploidy", ")", ",", "axis", "=", "1", ")", "# fill values where allele frequencies could not be calculated", "af_sum", "=", "np", ".", "sum", "(", "af", ",", "axis", "=", "1", ")", "with", "ignore_invalid", "(", ")", ":", "out", "[", "(", "af_sum", "<", "1", ")", "|", "np", ".", "isnan", "(", "af_sum", ")", "]", "=", "fill", "return", "out" ]
Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ])
[ "Calculate", "the", "expected", "rate", "of", "heterozygosity", "for", "each", "variant", "under", "Hardy", "-", "Weinberg", "equilibrium", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L58-L103
991
cggh/scikit-allel
allel/stats/hw.py
inbreeding_coefficient
def inbreeding_coefficient(g, fill=np.nan): """Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
python
def inbreeding_coefficient(g, fill=np.nan): # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
[ "def", "inbreeding_coefficient", "(", "g", ",", "fill", "=", "np", ".", "nan", ")", ":", "# check inputs", "if", "not", "hasattr", "(", "g", ",", "'count_het'", ")", "or", "not", "hasattr", "(", "g", ",", "'count_called'", ")", ":", "g", "=", "GenotypeArray", "(", "g", ",", "copy", "=", "False", ")", "# calculate observed and expected heterozygosity", "ho", "=", "heterozygosity_observed", "(", "g", ")", "af", "=", "g", ".", "count_alleles", "(", ")", ".", "to_frequencies", "(", ")", "he", "=", "heterozygosity_expected", "(", "af", ",", "ploidy", "=", "g", ".", "shape", "[", "-", "1", "]", ",", "fill", "=", "0", ")", "# calculate inbreeding coefficient, accounting for variants with no", "# expected heterozygosity", "with", "ignore_invalid", "(", ")", ":", "f", "=", "np", ".", "where", "(", "he", ">", "0", ",", "1", "-", "(", "ho", "/", "he", ")", ",", "fill", ")", "return", "f" ]
Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333])
[ "Calculate", "the", "inbreeding", "coefficient", "for", "each", "variant", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L106-L157
992
cggh/scikit-allel
allel/stats/mendel.py
mendel_errors
def mendel_errors(parent_genotypes, progeny_genotypes): """Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls for the progeny. Returns ------- me : ndarray, int, shape (n_variants, n_progeny) Count of Mendel errors for each progeny genotype call. Examples -------- The following are all consistent with Mendelian transmission. Note that a value of 0 is returned for missing calls:: >>> import allel >>> import numpy as np >>> genotypes = np.array([ ... # aa x aa -> aa ... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x ab -> aa or ab ... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]], ... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]], ... # aa x bb -> ab ... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x bc -> ab or ac ... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]], ... # ab x ab -> aa or ab or bb ... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]], ... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]], ... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]], ... # ab x bc -> ab or ac or bb or bc ... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]], ... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]], ... # ab x cd -> ac or ad or bc or bd ... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) The following are cases of 'non-parental' inheritance where one or two alleles are found in the progeny that are not present in either parent. Note that the number of errors may be 1 or 2 depending on the number of non-parental alleles:: >>> genotypes = np.array([ ... # aa x aa -> ab or ac or bb or cc ... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]], ... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]], ... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]], ... # aa x ab -> ac or bc or cc ... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]], ... # aa x bb -> ac or bc or cc ... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x ab -> ac or bc or cc ... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x bc -> ad or bd or cd or dd ... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... # ab x cd -> ae or be or ce or de ... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 1]]) The following are cases of 'hemi-parental' inheritance, where progeny appear to have inherited two copies of an allele found only once in one of the parents:: >>> genotypes = np.array([ ... # aa x ab -> bb ... [[0, 0], [0, 1], [1, 1], [-1, -1]], ... [[0, 0], [0, 2], [2, 2], [-1, -1]], ... [[1, 1], [0, 1], [0, 0], [-1, -1]], ... # ab x bc -> aa or cc ... [[0, 1], [1, 2], [0, 0], [2, 2]], ... [[0, 1], [0, 2], [1, 1], [2, 2]], ... [[0, 2], [1, 2], [0, 0], [1, 1]], ... # ab x cd -> aa or bb or cc or dd ... [[0, 1], [2, 3], [0, 0], [1, 1]], ... [[0, 1], [2, 3], [2, 2], [3, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) The following are cases of 'uni-parental' inheritance, where progeny appear to have inherited both alleles from a single parent:: >>> genotypes = np.array([ ... # aa x bb -> aa or bb ... [[0, 0], [1, 1], [0, 0], [1, 1]], ... [[0, 0], [2, 2], [0, 0], [2, 2]], ... [[1, 1], [2, 2], [1, 1], [2, 2]], ... # aa x bc -> aa or bc ... [[0, 0], [1, 2], [0, 0], [1, 2]], ... [[1, 1], [0, 2], [1, 1], [0, 2]], ... # ab x cd -> ab or cd ... [[0, 1], [2, 3], [0, 1], [2, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) """ # setup parent_genotypes = GenotypeArray(parent_genotypes) progeny_genotypes = GenotypeArray(progeny_genotypes) check_ploidy(parent_genotypes.ploidy, 2) check_ploidy(progeny_genotypes.ploidy, 2) # transform into per-call allele counts max_allele = max(parent_genotypes.max(), progeny_genotypes.max()) parent_gc = parent_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') progeny_gc = progeny_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') # detect nonparental and hemiparental inheritance by comparing allele # counts between parents and progeny max_progeny_gc = parent_gc.clip(max=1).sum(axis=1) max_progeny_gc = max_progeny_gc[:, np.newaxis, :] me = (progeny_gc - max_progeny_gc).clip(min=0).sum(axis=2) # detect uniparental inheritance by finding cases where no alleles are # shared between parents, then comparing progeny allele counts to each # parent p1_gc = parent_gc[:, 0, np.newaxis, :] p2_gc = parent_gc[:, 1, np.newaxis, :] # find variants where parents don't share any alleles is_shared_allele = (p1_gc > 0) & (p2_gc > 0) no_shared_alleles = ~np.any(is_shared_allele, axis=2) # find calls where progeny genotype is identical to one or the other parent me[no_shared_alleles & (np.all(progeny_gc == p1_gc, axis=2) | np.all(progeny_gc == p2_gc, axis=2))] = 1 # retrofit where either or both parent has a missing call me[np.any(parent_genotypes.is_missing(), axis=1)] = 0 return me
python
def mendel_errors(parent_genotypes, progeny_genotypes): # setup parent_genotypes = GenotypeArray(parent_genotypes) progeny_genotypes = GenotypeArray(progeny_genotypes) check_ploidy(parent_genotypes.ploidy, 2) check_ploidy(progeny_genotypes.ploidy, 2) # transform into per-call allele counts max_allele = max(parent_genotypes.max(), progeny_genotypes.max()) parent_gc = parent_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') progeny_gc = progeny_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1') # detect nonparental and hemiparental inheritance by comparing allele # counts between parents and progeny max_progeny_gc = parent_gc.clip(max=1).sum(axis=1) max_progeny_gc = max_progeny_gc[:, np.newaxis, :] me = (progeny_gc - max_progeny_gc).clip(min=0).sum(axis=2) # detect uniparental inheritance by finding cases where no alleles are # shared between parents, then comparing progeny allele counts to each # parent p1_gc = parent_gc[:, 0, np.newaxis, :] p2_gc = parent_gc[:, 1, np.newaxis, :] # find variants where parents don't share any alleles is_shared_allele = (p1_gc > 0) & (p2_gc > 0) no_shared_alleles = ~np.any(is_shared_allele, axis=2) # find calls where progeny genotype is identical to one or the other parent me[no_shared_alleles & (np.all(progeny_gc == p1_gc, axis=2) | np.all(progeny_gc == p2_gc, axis=2))] = 1 # retrofit where either or both parent has a missing call me[np.any(parent_genotypes.is_missing(), axis=1)] = 0 return me
[ "def", "mendel_errors", "(", "parent_genotypes", ",", "progeny_genotypes", ")", ":", "# setup", "parent_genotypes", "=", "GenotypeArray", "(", "parent_genotypes", ")", "progeny_genotypes", "=", "GenotypeArray", "(", "progeny_genotypes", ")", "check_ploidy", "(", "parent_genotypes", ".", "ploidy", ",", "2", ")", "check_ploidy", "(", "progeny_genotypes", ".", "ploidy", ",", "2", ")", "# transform into per-call allele counts", "max_allele", "=", "max", "(", "parent_genotypes", ".", "max", "(", ")", ",", "progeny_genotypes", ".", "max", "(", ")", ")", "parent_gc", "=", "parent_genotypes", ".", "to_allele_counts", "(", "max_allele", "=", "max_allele", ",", "dtype", "=", "'i1'", ")", "progeny_gc", "=", "progeny_genotypes", ".", "to_allele_counts", "(", "max_allele", "=", "max_allele", ",", "dtype", "=", "'i1'", ")", "# detect nonparental and hemiparental inheritance by comparing allele", "# counts between parents and progeny", "max_progeny_gc", "=", "parent_gc", ".", "clip", "(", "max", "=", "1", ")", ".", "sum", "(", "axis", "=", "1", ")", "max_progeny_gc", "=", "max_progeny_gc", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", "me", "=", "(", "progeny_gc", "-", "max_progeny_gc", ")", ".", "clip", "(", "min", "=", "0", ")", ".", "sum", "(", "axis", "=", "2", ")", "# detect uniparental inheritance by finding cases where no alleles are", "# shared between parents, then comparing progeny allele counts to each", "# parent", "p1_gc", "=", "parent_gc", "[", ":", ",", "0", ",", "np", ".", "newaxis", ",", ":", "]", "p2_gc", "=", "parent_gc", "[", ":", ",", "1", ",", "np", ".", "newaxis", ",", ":", "]", "# find variants where parents don't share any alleles", "is_shared_allele", "=", "(", "p1_gc", ">", "0", ")", "&", "(", "p2_gc", ">", "0", ")", "no_shared_alleles", "=", "~", "np", ".", "any", "(", "is_shared_allele", ",", "axis", "=", "2", ")", "# find calls where progeny genotype is identical to one or the other parent", "me", "[", "no_shared_alleles", "&", "(", "np", ".", "all", "(", "progeny_gc", "==", "p1_gc", ",", "axis", "=", "2", ")", "|", "np", ".", "all", "(", "progeny_gc", "==", "p2_gc", ",", "axis", "=", "2", ")", ")", "]", "=", "1", "# retrofit where either or both parent has a missing call", "me", "[", "np", ".", "any", "(", "parent_genotypes", ".", "is_missing", "(", ")", ",", "axis", "=", "1", ")", "]", "=", "0", "return", "me" ]
Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls for the progeny. Returns ------- me : ndarray, int, shape (n_variants, n_progeny) Count of Mendel errors for each progeny genotype call. Examples -------- The following are all consistent with Mendelian transmission. Note that a value of 0 is returned for missing calls:: >>> import allel >>> import numpy as np >>> genotypes = np.array([ ... # aa x aa -> aa ... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x ab -> aa or ab ... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]], ... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]], ... # aa x bb -> ab ... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]], ... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]], ... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]], ... # aa x bc -> ab or ac ... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]], ... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]], ... # ab x ab -> aa or ab or bb ... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]], ... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]], ... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]], ... # ab x bc -> ab or ac or bb or bc ... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]], ... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]], ... # ab x cd -> ac or ad or bc or bd ... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) The following are cases of 'non-parental' inheritance where one or two alleles are found in the progeny that are not present in either parent. Note that the number of errors may be 1 or 2 depending on the number of non-parental alleles:: >>> genotypes = np.array([ ... # aa x aa -> ab or ac or bb or cc ... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]], ... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]], ... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]], ... # aa x ab -> ac or bc or cc ... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]], ... # aa x bb -> ac or bc or cc ... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x ab -> ac or bc or cc ... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]], ... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]], ... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]], ... # ab x bc -> ad or bd or cd or dd ... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]], ... # ab x cd -> ae or be or ce or de ... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 2], [1, 1, 1, 1]]) The following are cases of 'hemi-parental' inheritance, where progeny appear to have inherited two copies of an allele found only once in one of the parents:: >>> genotypes = np.array([ ... # aa x ab -> bb ... [[0, 0], [0, 1], [1, 1], [-1, -1]], ... [[0, 0], [0, 2], [2, 2], [-1, -1]], ... [[1, 1], [0, 1], [0, 0], [-1, -1]], ... # ab x bc -> aa or cc ... [[0, 1], [1, 2], [0, 0], [2, 2]], ... [[0, 1], [0, 2], [1, 1], [2, 2]], ... [[0, 2], [1, 2], [0, 0], [1, 1]], ... # ab x cd -> aa or bb or cc or dd ... [[0, 1], [2, 3], [0, 0], [1, 1]], ... [[0, 1], [2, 3], [2, 2], [3, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 0], [1, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]]) The following are cases of 'uni-parental' inheritance, where progeny appear to have inherited both alleles from a single parent:: >>> genotypes = np.array([ ... # aa x bb -> aa or bb ... [[0, 0], [1, 1], [0, 0], [1, 1]], ... [[0, 0], [2, 2], [0, 0], [2, 2]], ... [[1, 1], [2, 2], [1, 1], [2, 2]], ... # aa x bc -> aa or bc ... [[0, 0], [1, 2], [0, 0], [1, 2]], ... [[1, 1], [0, 2], [1, 1], [0, 2]], ... # ab x cd -> ab or cd ... [[0, 1], [2, 3], [0, 1], [2, 3]], ... ]) >>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:]) >>> me array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]])
[ "Locate", "genotype", "calls", "not", "consistent", "with", "Mendelian", "transmission", "of", "alleles", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L15-L218
993
cggh/scikit-allel
allel/stats/mendel.py
paint_transmission
def paint_transmission(parent_haplotypes, progeny_haplotypes): """Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) Haplotypes found in progeny of the given parent, inherited from the given parent. I.e., haplotypes from gametes of the given parent. Returns ------- painting : ndarray, uint8, shape (n_variants, n_progeny) An array of integers coded as follows: 1 = allele inherited from first parental haplotype; 2 = allele inherited from second parental haplotype; 3 = reference allele, also carried by both parental haplotypes; 4 = non-reference allele, also carried by both parental haplotypes; 5 = non-parental allele; 6 = either or both parental alleles missing; 7 = missing allele; 0 = undetermined. Examples -------- >>> import allel >>> haplotypes = allel.HaplotypeArray([ ... [0, 0, 0, 1, 2, -1], ... [0, 1, 0, 1, 2, -1], ... [1, 0, 0, 1, 2, -1], ... [1, 1, 0, 1, 2, -1], ... [0, 2, 0, 1, 2, -1], ... [0, -1, 0, 1, 2, -1], ... [-1, 1, 0, 1, 2, -1], ... [-1, -1, 0, 1, 2, -1], ... ], dtype='i1') >>> painting = allel.paint_transmission(haplotypes[:, :2], ... haplotypes[:, 2:]) >>> painting array([[3, 5, 5, 7], [1, 2, 5, 7], [2, 1, 5, 7], [5, 4, 5, 7], [1, 5, 2, 7], [6, 6, 6, 7], [6, 6, 6, 7], [6, 6, 6, 7]], dtype=uint8) """ # check inputs parent_haplotypes = HaplotypeArray(parent_haplotypes) progeny_haplotypes = HaplotypeArray(progeny_haplotypes) if parent_haplotypes.n_haplotypes != 2: raise ValueError('exactly two parental haplotypes should be provided') # convenience variables parent1 = parent_haplotypes[:, 0, np.newaxis] parent2 = parent_haplotypes[:, 1, np.newaxis] progeny_is_missing = progeny_haplotypes < 0 parent_is_missing = np.any(parent_haplotypes < 0, axis=1) # need this for broadcasting, but also need to retain original for later parent_is_missing_bc = parent_is_missing[:, np.newaxis] parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :]) parent_is_hom_ref = parent_diplotype.is_hom_ref() parent_is_het = parent_diplotype.is_het() parent_is_hom_alt = parent_diplotype.is_hom_alt() # identify allele calls where inheritance can be determined is_callable = ~progeny_is_missing & ~parent_is_missing_bc is_callable_seg = is_callable & parent_is_het # main inheritance states inherit_parent1 = is_callable_seg & (progeny_haplotypes == parent1) inherit_parent2 = is_callable_seg & (progeny_haplotypes == parent2) nonseg_ref = (is_callable & parent_is_hom_ref & (progeny_haplotypes == parent1)) nonseg_alt = (is_callable & parent_is_hom_alt & (progeny_haplotypes == parent1)) nonparental = ( is_callable & (progeny_haplotypes != parent1) & (progeny_haplotypes != parent2) ) # record inheritance states # N.B., order in which these are set matters painting = np.zeros(progeny_haplotypes.shape, dtype='u1') painting[inherit_parent1] = INHERIT_PARENT1 painting[inherit_parent2] = INHERIT_PARENT2 painting[nonseg_ref] = INHERIT_NONSEG_REF painting[nonseg_alt] = INHERIT_NONSEG_ALT painting[nonparental] = INHERIT_NONPARENTAL painting[parent_is_missing] = INHERIT_PARENT_MISSING painting[progeny_is_missing] = INHERIT_MISSING return painting
python
def paint_transmission(parent_haplotypes, progeny_haplotypes): # check inputs parent_haplotypes = HaplotypeArray(parent_haplotypes) progeny_haplotypes = HaplotypeArray(progeny_haplotypes) if parent_haplotypes.n_haplotypes != 2: raise ValueError('exactly two parental haplotypes should be provided') # convenience variables parent1 = parent_haplotypes[:, 0, np.newaxis] parent2 = parent_haplotypes[:, 1, np.newaxis] progeny_is_missing = progeny_haplotypes < 0 parent_is_missing = np.any(parent_haplotypes < 0, axis=1) # need this for broadcasting, but also need to retain original for later parent_is_missing_bc = parent_is_missing[:, np.newaxis] parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :]) parent_is_hom_ref = parent_diplotype.is_hom_ref() parent_is_het = parent_diplotype.is_het() parent_is_hom_alt = parent_diplotype.is_hom_alt() # identify allele calls where inheritance can be determined is_callable = ~progeny_is_missing & ~parent_is_missing_bc is_callable_seg = is_callable & parent_is_het # main inheritance states inherit_parent1 = is_callable_seg & (progeny_haplotypes == parent1) inherit_parent2 = is_callable_seg & (progeny_haplotypes == parent2) nonseg_ref = (is_callable & parent_is_hom_ref & (progeny_haplotypes == parent1)) nonseg_alt = (is_callable & parent_is_hom_alt & (progeny_haplotypes == parent1)) nonparental = ( is_callable & (progeny_haplotypes != parent1) & (progeny_haplotypes != parent2) ) # record inheritance states # N.B., order in which these are set matters painting = np.zeros(progeny_haplotypes.shape, dtype='u1') painting[inherit_parent1] = INHERIT_PARENT1 painting[inherit_parent2] = INHERIT_PARENT2 painting[nonseg_ref] = INHERIT_NONSEG_REF painting[nonseg_alt] = INHERIT_NONSEG_ALT painting[nonparental] = INHERIT_NONPARENTAL painting[parent_is_missing] = INHERIT_PARENT_MISSING painting[progeny_is_missing] = INHERIT_MISSING return painting
[ "def", "paint_transmission", "(", "parent_haplotypes", ",", "progeny_haplotypes", ")", ":", "# check inputs", "parent_haplotypes", "=", "HaplotypeArray", "(", "parent_haplotypes", ")", "progeny_haplotypes", "=", "HaplotypeArray", "(", "progeny_haplotypes", ")", "if", "parent_haplotypes", ".", "n_haplotypes", "!=", "2", ":", "raise", "ValueError", "(", "'exactly two parental haplotypes should be provided'", ")", "# convenience variables", "parent1", "=", "parent_haplotypes", "[", ":", ",", "0", ",", "np", ".", "newaxis", "]", "parent2", "=", "parent_haplotypes", "[", ":", ",", "1", ",", "np", ".", "newaxis", "]", "progeny_is_missing", "=", "progeny_haplotypes", "<", "0", "parent_is_missing", "=", "np", ".", "any", "(", "parent_haplotypes", "<", "0", ",", "axis", "=", "1", ")", "# need this for broadcasting, but also need to retain original for later", "parent_is_missing_bc", "=", "parent_is_missing", "[", ":", ",", "np", ".", "newaxis", "]", "parent_diplotype", "=", "GenotypeArray", "(", "parent_haplotypes", "[", ":", ",", "np", ".", "newaxis", ",", ":", "]", ")", "parent_is_hom_ref", "=", "parent_diplotype", ".", "is_hom_ref", "(", ")", "parent_is_het", "=", "parent_diplotype", ".", "is_het", "(", ")", "parent_is_hom_alt", "=", "parent_diplotype", ".", "is_hom_alt", "(", ")", "# identify allele calls where inheritance can be determined", "is_callable", "=", "~", "progeny_is_missing", "&", "~", "parent_is_missing_bc", "is_callable_seg", "=", "is_callable", "&", "parent_is_het", "# main inheritance states", "inherit_parent1", "=", "is_callable_seg", "&", "(", "progeny_haplotypes", "==", "parent1", ")", "inherit_parent2", "=", "is_callable_seg", "&", "(", "progeny_haplotypes", "==", "parent2", ")", "nonseg_ref", "=", "(", "is_callable", "&", "parent_is_hom_ref", "&", "(", "progeny_haplotypes", "==", "parent1", ")", ")", "nonseg_alt", "=", "(", "is_callable", "&", "parent_is_hom_alt", "&", "(", "progeny_haplotypes", "==", "parent1", ")", ")", "nonparental", "=", "(", "is_callable", "&", "(", "progeny_haplotypes", "!=", "parent1", ")", "&", "(", "progeny_haplotypes", "!=", "parent2", ")", ")", "# record inheritance states", "# N.B., order in which these are set matters", "painting", "=", "np", ".", "zeros", "(", "progeny_haplotypes", ".", "shape", ",", "dtype", "=", "'u1'", ")", "painting", "[", "inherit_parent1", "]", "=", "INHERIT_PARENT1", "painting", "[", "inherit_parent2", "]", "=", "INHERIT_PARENT2", "painting", "[", "nonseg_ref", "]", "=", "INHERIT_NONSEG_REF", "painting", "[", "nonseg_alt", "]", "=", "INHERIT_NONSEG_ALT", "painting", "[", "nonparental", "]", "=", "INHERIT_NONPARENTAL", "painting", "[", "parent_is_missing", "]", "=", "INHERIT_PARENT_MISSING", "painting", "[", "progeny_is_missing", "]", "=", "INHERIT_MISSING", "return", "painting" ]
Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) Haplotypes found in progeny of the given parent, inherited from the given parent. I.e., haplotypes from gametes of the given parent. Returns ------- painting : ndarray, uint8, shape (n_variants, n_progeny) An array of integers coded as follows: 1 = allele inherited from first parental haplotype; 2 = allele inherited from second parental haplotype; 3 = reference allele, also carried by both parental haplotypes; 4 = non-reference allele, also carried by both parental haplotypes; 5 = non-parental allele; 6 = either or both parental alleles missing; 7 = missing allele; 0 = undetermined. Examples -------- >>> import allel >>> haplotypes = allel.HaplotypeArray([ ... [0, 0, 0, 1, 2, -1], ... [0, 1, 0, 1, 2, -1], ... [1, 0, 0, 1, 2, -1], ... [1, 1, 0, 1, 2, -1], ... [0, 2, 0, 1, 2, -1], ... [0, -1, 0, 1, 2, -1], ... [-1, 1, 0, 1, 2, -1], ... [-1, -1, 0, 1, 2, -1], ... ], dtype='i1') >>> painting = allel.paint_transmission(haplotypes[:, :2], ... haplotypes[:, 2:]) >>> painting array([[3, 5, 5, 7], [1, 2, 5, 7], [2, 1, 5, 7], [5, 4, 5, 7], [1, 5, 2, 7], [6, 6, 6, 7], [6, 6, 6, 7], [6, 6, 6, 7]], dtype=uint8)
[ "Paint", "haplotypes", "inherited", "from", "a", "single", "diploid", "parent", "according", "to", "their", "allelic", "inheritance", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L232-L323
994
cggh/scikit-allel
allel/stats/mendel.py
phase_progeny_by_transmission
def phase_progeny_by_transmission(g): """Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]]) """ # setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # outputs return g
python
def phase_progeny_by_transmission(g): # setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # outputs return g
[ "def", "phase_progeny_by_transmission", "(", "g", ")", ":", "# setup", "g", "=", "GenotypeArray", "(", "g", ",", "dtype", "=", "'i1'", ",", "copy", "=", "True", ")", "check_ploidy", "(", "g", ".", "ploidy", ",", "2", ")", "check_min_samples", "(", "g", ".", "n_samples", ",", "3", ")", "# run the phasing", "# N.B., a copy has already been made, so no need to make memoryview safe", "is_phased", "=", "_opt_phase_progeny_by_transmission", "(", "g", ".", "values", ")", "g", ".", "is_phased", "=", "np", ".", "asarray", "(", "is_phased", ")", ".", "view", "(", "bool", ")", "# outputs", "return", "g" ]
Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]])
[ "Phase", "progeny", "genotypes", "from", "a", "trio", "or", "cross", "using", "Mendelian", "transmission", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L326-L405
995
cggh/scikit-allel
allel/stats/mendel.py
phase_parents_by_transmission
def phase_parents_by_transmission(g, window_size): """Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible. """ # setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) # run the phasing g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) # outputs return g
python
def phase_parents_by_transmission(g, window_size): # setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) # run the phasing g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) # outputs return g
[ "def", "phase_parents_by_transmission", "(", "g", ",", "window_size", ")", ":", "# setup", "check_type", "(", "g", ",", "GenotypeArray", ")", "check_dtype", "(", "g", ".", "values", ",", "'i1'", ")", "check_ploidy", "(", "g", ".", "ploidy", ",", "2", ")", "if", "g", ".", "is_phased", "is", "None", ":", "raise", "ValueError", "(", "'genotype array must first have progeny phased by transmission'", ")", "check_min_samples", "(", "g", ".", "n_samples", ",", "3", ")", "# run the phasing", "g", ".", "_values", "=", "memoryview_safe", "(", "g", ".", "values", ")", "g", ".", "_is_phased", "=", "memoryview_safe", "(", "g", ".", "is_phased", ")", "_opt_phase_parents_by_transmission", "(", "g", ".", "values", ",", "g", ".", "is_phased", ".", "view", "(", "'u1'", ")", ",", "window_size", ")", "# outputs", "return", "g" ]
Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible.
[ "Phase", "parent", "genotypes", "from", "a", "trio", "or", "cross", "given", "progeny", "genotypes", "already", "phased", "by", "Mendelian", "transmission", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L408-L443
996
cggh/scikit-allel
allel/stats/mendel.py
phase_by_transmission
def phase_by_transmission(g, window_size, copy=True): """Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible. """ # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
python
def phase_by_transmission(g, window_size, copy=True): # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
[ "def", "phase_by_transmission", "(", "g", ",", "window_size", ",", "copy", "=", "True", ")", ":", "# setup", "g", "=", "np", ".", "asarray", "(", "g", ",", "dtype", "=", "'i1'", ")", "g", "=", "GenotypeArray", "(", "g", ",", "copy", "=", "copy", ")", "g", ".", "_values", "=", "memoryview_safe", "(", "g", ".", "values", ")", "check_ploidy", "(", "g", ".", "ploidy", ",", "2", ")", "check_min_samples", "(", "g", ".", "n_samples", ",", "3", ")", "# phase the progeny", "is_phased", "=", "_opt_phase_progeny_by_transmission", "(", "g", ".", "values", ")", "g", ".", "is_phased", "=", "np", ".", "asarray", "(", "is_phased", ")", ".", "view", "(", "bool", ")", "# phase the parents", "_opt_phase_parents_by_transmission", "(", "g", ".", "values", ",", "is_phased", ",", "window_size", ")", "return", "g" ]
Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible.
[ "Phase", "genotypes", "in", "a", "trio", "or", "cross", "where", "possible", "using", "Mendelian", "transmission", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/mendel.py#L446-L485
997
cggh/scikit-allel
allel/chunked/util.py
get_blen_array
def get_blen_array(data, blen=None): """Try to guess a reasonable block length to use for block-wise iteration over `data`.""" if blen is None: if hasattr(data, 'chunklen'): # bcolz carray return data.chunklen elif hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset return data.chunks[0] else: # fall back to something simple, ~1Mb chunks row = np.asarray(data[0]) return max(1, (2**20) // row.nbytes) else: return blen
python
def get_blen_array(data, blen=None): if blen is None: if hasattr(data, 'chunklen'): # bcolz carray return data.chunklen elif hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset return data.chunks[0] else: # fall back to something simple, ~1Mb chunks row = np.asarray(data[0]) return max(1, (2**20) // row.nbytes) else: return blen
[ "def", "get_blen_array", "(", "data", ",", "blen", "=", "None", ")", ":", "if", "blen", "is", "None", ":", "if", "hasattr", "(", "data", ",", "'chunklen'", ")", ":", "# bcolz carray", "return", "data", ".", "chunklen", "elif", "hasattr", "(", "data", ",", "'chunks'", ")", "and", "hasattr", "(", "data", ",", "'shape'", ")", "and", "hasattr", "(", "data", ".", "chunks", ",", "'__len__'", ")", "and", "hasattr", "(", "data", ".", "shape", ",", "'__len__'", ")", "and", "len", "(", "data", ".", "chunks", ")", "==", "len", "(", "data", ".", "shape", ")", ":", "# something like h5py dataset", "return", "data", ".", "chunks", "[", "0", "]", "else", ":", "# fall back to something simple, ~1Mb chunks", "row", "=", "np", ".", "asarray", "(", "data", "[", "0", "]", ")", "return", "max", "(", "1", ",", "(", "2", "**", "20", ")", "//", "row", ".", "nbytes", ")", "else", ":", "return", "blen" ]
Try to guess a reasonable block length to use for block-wise iteration over `data`.
[ "Try", "to", "guess", "a", "reasonable", "block", "length", "to", "use", "for", "block", "-", "wise", "iteration", "over", "data", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/util.py#L96-L120
998
cggh/scikit-allel
allel/chunked/storage_hdf5.py
h5fmem
def h5fmem(**kwargs): """Create an in-memory HDF5 file.""" # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
python
def h5fmem(**kwargs): # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
[ "def", "h5fmem", "(", "*", "*", "kwargs", ")", ":", "# need a file name even tho nothing is ever written", "fn", "=", "tempfile", ".", "mktemp", "(", ")", "# file creation args", "kwargs", "[", "'mode'", "]", "=", "'w'", "kwargs", "[", "'driver'", "]", "=", "'core'", "kwargs", "[", "'backing_store'", "]", "=", "False", "# open HDF5 file", "h5f", "=", "h5py", ".", "File", "(", "fn", ",", "*", "*", "kwargs", ")", "return", "h5f" ]
Create an in-memory HDF5 file.
[ "Create", "an", "in", "-", "memory", "HDF5", "file", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/storage_hdf5.py#L17-L31
999
cggh/scikit-allel
allel/chunked/storage_hdf5.py
h5ftmp
def h5ftmp(**kwargs): """Create an HDF5 file backed by a temporary file.""" # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
python
def h5ftmp(**kwargs): # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
[ "def", "h5ftmp", "(", "*", "*", "kwargs", ")", ":", "# create temporary file name", "suffix", "=", "kwargs", ".", "pop", "(", "'suffix'", ",", "'.h5'", ")", "prefix", "=", "kwargs", ".", "pop", "(", "'prefix'", ",", "'scikit_allel_'", ")", "tempdir", "=", "kwargs", ".", "pop", "(", "'dir'", ",", "None", ")", "fn", "=", "tempfile", ".", "mktemp", "(", "suffix", "=", "suffix", ",", "prefix", "=", "prefix", ",", "dir", "=", "tempdir", ")", "atexit", ".", "register", "(", "os", ".", "remove", ",", "fn", ")", "# file creation args", "kwargs", "[", "'mode'", "]", "=", "'w'", "# open HDF5 file", "h5f", "=", "h5py", ".", "File", "(", "fn", ",", "*", "*", "kwargs", ")", "return", "h5f" ]
Create an HDF5 file backed by a temporary file.
[ "Create", "an", "HDF5", "file", "backed", "by", "a", "temporary", "file", "." ]
3c979a57a100240ba959dd13f98839349530f215
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/chunked/storage_hdf5.py#L34-L50