function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
---|---|---|---|---|---|---|
test_set_one_ca_list
|
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
# MASKED: test_set_one_ca_list function (lines 3514-3528)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
| 3,514 | 3,528 |
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
test_set_multiple_ca_list
|
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
# MASKED: test_set_multiple_ca_list function (lines 3530-3548)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
| 3,530 | 3,548 |
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
test_set_after_add_client_ca
|
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
# MASKED: test_set_after_add_client_ca function (lines 3648-3666)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
| 3,648 | 3,666 |
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
test_integers
|
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
# MASKED: test_integers function (lines 3673-3694)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
| 3,673 | 3,694 |
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
_client_connection
|
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
# MASKED: _client_connection function (lines 3741-3759)
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
| 3,741 | 3,759 |
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
_server_connection
|
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
# MASKED: _server_connection function (lines 3761-3775)
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
| 3,761 | 3,775 |
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
"""
Unit tests for :mod:`OpenSSL.SSL`.
"""
import datetime
import sys
import uuid
from gc import collect, get_referrers
from errno import (
EAFNOSUPPORT, ECONNREFUSED, EINPROGRESS, EWOULDBLOCK, EPIPE, ESHUTDOWN)
from sys import platform, getfilesystemencoding
from socket import AF_INET, AF_INET6, MSG_PEEK, SHUT_RDWR, error, socket
from os import makedirs
from os.path import join
from weakref import ref
from warnings import simplefilter
import pytest
from pretend import raiser
from six import PY3, text_type
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL.crypto import TYPE_RSA, FILETYPE_PEM
from OpenSSL.crypto import PKey, X509, X509Extension, X509Store
from OpenSSL.crypto import dump_privatekey, load_privatekey
from OpenSSL.crypto import dump_certificate, load_certificate
from OpenSSL.crypto import get_elliptic_curves
from OpenSSL.SSL import OPENSSL_VERSION_NUMBER, SSLEAY_VERSION, SSLEAY_CFLAGS
from OpenSSL.SSL import SSLEAY_PLATFORM, SSLEAY_DIR, SSLEAY_BUILT_ON
from OpenSSL.SSL import SENT_SHUTDOWN, RECEIVED_SHUTDOWN
from OpenSSL.SSL import (
SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD,
TLSv1_1_METHOD, TLSv1_2_METHOD)
from OpenSSL.SSL import OP_SINGLE_DH_USE, OP_NO_SSLv2, OP_NO_SSLv3
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE, VERIFY_NONE)
from OpenSSL import SSL
from OpenSSL.SSL import (
SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH,
SESS_CACHE_NO_AUTO_CLEAR, SESS_CACHE_NO_INTERNAL_LOOKUP,
SESS_CACHE_NO_INTERNAL_STORE, SESS_CACHE_NO_INTERNAL)
from OpenSSL.SSL import (
Error, SysCallError, WantReadError, WantWriteError, ZeroReturnError)
from OpenSSL.SSL import (
Context, Session, Connection, SSLeay_version)
from OpenSSL.SSL import _make_requires
from OpenSSL._util import ffi as _ffi, lib as _lib
from OpenSSL.SSL import (
OP_NO_QUERY_MTU, OP_COOKIE_EXCHANGE, OP_NO_TICKET, OP_NO_COMPRESSION,
MODE_RELEASE_BUFFERS)
from OpenSSL.SSL import (
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE)
try:
from OpenSSL.SSL import (
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
)
except ImportError:
SSL_ST_INIT = SSL_ST_BEFORE = SSL_ST_OK = SSL_ST_RENEGOTIATE = None
from .util import WARNING_TYPE_EXPECTED, NON_ASCII, is_consistent_type
from .test_crypto import (
cleartextCertificatePEM, cleartextPrivateKeyPEM,
client_cert_pem, client_key_pem, server_cert_pem, server_key_pem,
root_cert_pem)
# openssl dhparam 1024 -out dh-1024.pem (note that 1024 is a small number of
# bits to use)
dhparam = """\
-----BEGIN DH PARAMETERS-----
MIGHAoGBALdUMvn+C9MM+y5BWZs11mSeH6HHoEq0UVbzVq7UojC1hbsZUuGukQ3a
Qh2/pwqb18BZFykrWB0zv/OkLa0kx4cuUgNrUVq1EFheBiX6YqryJ7t2sO09NQiO
V7H54LmltOT/hEh6QWsJqb6BQgH65bswvV/XkYGja8/T0GzvbaVzAgEC
-----END DH PARAMETERS-----
"""
skip_if_py3 = pytest.mark.skipif(PY3, reason="Python 2 only")
def socket_any_family():
try:
return socket(AF_INET)
except error as e:
if e.errno == EAFNOSUPPORT:
return socket(AF_INET6)
raise
def loopback_address(socket):
if socket.family == AF_INET:
return "127.0.0.1"
else:
assert socket.family == AF_INET6
return "::1"
def join_bytes_or_unicode(prefix, suffix):
"""
Join two path components of either ``bytes`` or ``unicode``.
The return type is the same as the type of ``prefix``.
"""
# If the types are the same, nothing special is necessary.
if type(prefix) == type(suffix):
return join(prefix, suffix)
# Otherwise, coerce suffix to the type of prefix.
if isinstance(prefix, text_type):
return join(prefix, suffix.decode(getfilesystemencoding()))
else:
return join(prefix, suffix.encode(getfilesystemencoding()))
def verify_cb(conn, cert, errnum, depth, ok):
return ok
def socket_pair():
"""
Establish and return a pair of network sockets connected to each other.
"""
# Connect a pair of sockets
port = socket_any_family()
port.bind(('', 0))
port.listen(1)
client = socket(port.family)
client.setblocking(False)
client.connect_ex((loopback_address(port), port.getsockname()[1]))
client.setblocking(True)
server = port.accept()[0]
# Let's pass some unencrypted data to make sure our socket connection is
# fine. Just one byte, so we don't have to worry about buffers getting
# filled up or fragmentation.
server.send(b"x")
assert client.recv(1024) == b"x"
client.send(b"y")
assert server.recv(1024) == b"y"
# Most of our callers want non-blocking sockets, make it easy for them.
server.setblocking(False)
client.setblocking(False)
return (server, client)
def handshake(client, server):
conns = [client, server]
while conns:
for conn in conns:
try:
conn.do_handshake()
except WantReadError:
pass
else:
conns.remove(conn)
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b'basicConstraints', False, b'CA:true')
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 1024)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b"20000101000000Z")
cacert.set_notAfter(b"20200101000000Z")
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 1024)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b"20000101000000Z")
icert.set_notAfter(b"20200101000000Z")
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 1024)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b"20000101000000Z")
scert.set_notAfter(b"20200101000000Z")
scert.add_extensions([
X509Extension(b'basicConstraints', True, b'CA:false')])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
def loopback_client_factory(socket, version=SSLv23_METHOD):
client = Connection(Context(version), socket)
client.set_connect_state()
return client
def loopback_server_factory(socket, version=SSLv23_METHOD):
ctx = Context(version)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, socket)
server.set_accept_state()
return server
def loopback(server_factory=None, client_factory=None):
"""
Create a connected socket pair and force two connected SSL sockets
to talk to each other via memory BIOs.
"""
if server_factory is None:
server_factory = loopback_server_factory
if client_factory is None:
client_factory = loopback_client_factory
(server, client) = socket_pair()
server = server_factory(server)
client = client_factory(client)
handshake(client, server)
server.setblocking(True)
client.setblocking(True)
return server, client
def interact_in_memory(client_conn, server_conn):
"""
Try to read application bytes from each of the two `Connection` objects.
Copy bytes back and forth between their send/receive buffers for as long
as there is anything to copy. When there is nothing more to copy,
return `None`. If one of them actually manages to deliver some application
bytes, return a two-tuple of the connection from which the bytes were read
and the bytes themselves.
"""
wrote = True
while wrote:
# Loop until neither side has anything to say
wrote = False
# Copy stuff from each side's send buffer to the other side's
# receive buffer.
for (read, write) in [(client_conn, server_conn),
(server_conn, client_conn)]:
# Give the side a chance to generate some more bytes, or succeed.
try:
data = read.recv(2 ** 16)
except WantReadError:
# It didn't succeed, so we'll hope it generated some output.
pass
else:
# It did succeed, so we'll stop now and let the caller deal
# with it.
return (read, data)
while True:
# Keep copying as long as there's more stuff there.
try:
dirty = read.bio_read(4096)
except WantReadError:
# Okay, nothing more waiting to be sent. Stop
# processing this send buffer.
break
else:
# Keep track of the fact that someone generated some
# output.
wrote = True
write.bio_write(dirty)
def handshake_in_memory(client_conn, server_conn):
"""
Perform the TLS handshake between two `Connection` instances connected to
each other via memory BIOs.
"""
client_conn.set_connect_state()
server_conn.set_accept_state()
for conn in [client_conn, server_conn]:
try:
conn.do_handshake()
except WantReadError:
pass
interact_in_memory(client_conn, server_conn)
class TestVersion(object):
"""
Tests for version information exposed by `OpenSSL.SSL.SSLeay_version` and
`OpenSSL.SSL.OPENSSL_VERSION_NUMBER`.
"""
def test_OPENSSL_VERSION_NUMBER(self):
"""
`OPENSSL_VERSION_NUMBER` is an integer with status in the low byte and
the patch, fix, minor, and major versions in the nibbles above that.
"""
assert isinstance(OPENSSL_VERSION_NUMBER, int)
def test_SSLeay_version(self):
"""
`SSLeay_version` takes a version type indicator and returns one of a
number of version strings based on that indicator.
"""
versions = {}
for t in [SSLEAY_VERSION, SSLEAY_CFLAGS, SSLEAY_BUILT_ON,
SSLEAY_PLATFORM, SSLEAY_DIR]:
version = SSLeay_version(t)
versions[version] = t
assert isinstance(version, bytes)
assert len(versions) == 5
@pytest.fixture
def ca_file(tmpdir):
"""
Create a valid PEM file with CA certificates and return the path.
"""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = key.public_key()
builder = x509.CertificateBuilder()
builder = builder.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
builder = builder.issuer_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"pyopenssl.org"),
]))
one_day = datetime.timedelta(1, 0, 0)
builder = builder.not_valid_before(datetime.datetime.today() - one_day)
builder = builder.not_valid_after(datetime.datetime.today() + one_day)
builder = builder.serial_number(int(uuid.uuid4()))
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True,
)
certificate = builder.sign(
private_key=key, algorithm=hashes.SHA256(),
backend=default_backend()
)
ca_file = tmpdir.join("test.pem")
ca_file.write_binary(
certificate.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
return str(ca_file).encode("ascii")
@pytest.fixture
def context():
"""
A simple TLS 1.0 context.
"""
return Context(TLSv1_METHOD)
class TestContext(object):
"""
Unit tests for `OpenSSL.SSL.Context`.
"""
@pytest.mark.parametrize("cipher_string", [
b"hello world:AES128-SHA",
u"hello world:AES128-SHA",
])
def test_set_cipher_list(self, context, cipher_string):
"""
`Context.set_cipher_list` accepts both byte and unicode strings
for naming the ciphers which connections created with the context
object will be able to choose from.
"""
context.set_cipher_list(cipher_string)
conn = Connection(context, None)
assert "AES128-SHA" in conn.get_cipher_list()
def test_set_cipher_list_wrong_type(self, context):
"""
`Context.set_cipher_list` raises `TypeError` when passed a non-string
argument.
"""
with pytest.raises(TypeError):
context.set_cipher_list(object())
def test_set_cipher_list_no_cipher_match(self, context):
"""
`Context.set_cipher_list` raises `OpenSSL.SSL.Error` with a
`"no cipher match"` reason string regardless of the TLS
version.
"""
with pytest.raises(Error) as excinfo:
context.set_cipher_list(b"imaginary-cipher")
assert excinfo.value.args == (
[
(
'SSL routines',
'SSL_CTX_set_cipher_list',
'no cipher match',
),
],
)
def test_load_client_ca(self, context, ca_file):
"""
`Context.load_client_ca` works as far as we can tell.
"""
context.load_client_ca(ca_file)
def test_load_client_ca_invalid(self, context, tmpdir):
"""
`Context.load_client_ca` raises an Error if the ca file is invalid.
"""
ca_file = tmpdir.join("test.pem")
ca_file.write("")
with pytest.raises(Error) as e:
context.load_client_ca(str(ca_file).encode("ascii"))
assert "PEM routines" == e.value.args[0][0][0]
def test_load_client_ca_unicode(self, context, ca_file):
"""
Passing the path as unicode raises a warning but works.
"""
pytest.deprecated_call(
context.load_client_ca, ca_file.decode("ascii")
)
def test_set_session_id(self, context):
"""
`Context.set_session_id` works as far as we can tell.
"""
context.set_session_id(b"abc")
def test_set_session_id_fail(self, context):
"""
`Context.set_session_id` errors are propagated.
"""
with pytest.raises(Error) as e:
context.set_session_id(b"abc" * 1000)
assert [
("SSL routines",
"SSL_CTX_set_session_id_context",
"ssl session id context too long")
] == e.value.args[0]
def test_set_session_id_unicode(self, context):
"""
`Context.set_session_id` raises a warning if a unicode string is
passed.
"""
pytest.deprecated_call(context.set_session_id, u"abc")
def test_method(self):
"""
`Context` can be instantiated with one of `SSLv2_METHOD`,
`SSLv3_METHOD`, `SSLv23_METHOD`, `TLSv1_METHOD`, `TLSv1_1_METHOD`,
or `TLSv1_2_METHOD`.
"""
methods = [SSLv23_METHOD, TLSv1_METHOD]
for meth in methods:
Context(meth)
maybe = [SSLv2_METHOD, SSLv3_METHOD, TLSv1_1_METHOD, TLSv1_2_METHOD]
for meth in maybe:
try:
Context(meth)
except (Error, ValueError):
# Some versions of OpenSSL have SSLv2 / TLSv1.1 / TLSv1.2, some
# don't. Difficult to say in advance.
pass
with pytest.raises(TypeError):
Context("")
with pytest.raises(ValueError):
Context(10)
def test_type(self):
"""
`Context` can be used to create instances of that type.
"""
assert is_consistent_type(Context, 'Context', TLSv1_METHOD)
def test_use_privatekey(self):
"""
`Context.use_privatekey` takes an `OpenSSL.crypto.PKey` instance.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(key)
with pytest.raises(TypeError):
ctx.use_privatekey("")
def test_use_privatekey_file_missing(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` when passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_privatekey_file(tmpfile)
def _use_privatekey_file_test(self, pemfile, filetype):
"""
Verify that calling ``Context.use_privatekey_file`` with the given
arguments does not raise an exception.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
with open(pemfile, "wt") as pem:
pem.write(
dump_privatekey(FILETYPE_PEM, key).decode("ascii")
)
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey_file(pemfile, filetype)
@pytest.mark.parametrize('filetype', [object(), "", None, 1.0])
def test_wrong_privatekey_file_wrong_args(self, tmpfile, filetype):
"""
`Context.use_privatekey_file` raises `TypeError` when called with
a `filetype` which is not a valid file encoding.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_privatekey_file(tmpfile, filetype)
def test_use_privatekey_file_bytes(self, tmpfile):
"""
A private key can be specified from a file by passing a ``bytes``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
FILETYPE_PEM,
)
def test_use_privatekey_file_unicode(self, tmpfile):
"""
A private key can be specified from a file by passing a ``unicode``
instance giving the file name to ``Context.use_privatekey_file``.
"""
self._use_privatekey_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
FILETYPE_PEM,
)
def test_use_certificate_wrong_args(self):
"""
`Context.use_certificate_wrong_args` raises `TypeError` when not passed
exactly one `OpenSSL.crypto.X509` instance as an argument.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate("hello, world")
def test_use_certificate_uninitialized(self):
"""
`Context.use_certificate` raises `OpenSSL.SSL.Error` when passed a
`OpenSSL.crypto.X509` instance which has not been initialized
(ie, which does not actually have any certificate data).
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate(X509())
def test_use_certificate(self):
"""
`Context.use_certificate` sets the certificate which will be
used to identify connections created using the context.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
ctx = Context(TLSv1_METHOD)
ctx.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM)
)
def test_use_certificate_file_wrong_args(self):
"""
`Context.use_certificate_file` raises `TypeError` if the first
argument is not a byte string or the second argument is not an integer.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
with pytest.raises(TypeError):
ctx.use_certificate_file(b"somefile", object())
with pytest.raises(TypeError):
ctx.use_certificate_file(object(), FILETYPE_PEM)
def test_use_certificate_file_missing(self, tmpfile):
"""
`Context.use_certificate_file` raises `OpenSSL.SSL.Error` if passed
the name of a file which does not exist.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(Error):
ctx.use_certificate_file(tmpfile)
def _use_certificate_file_test(self, certificate_file):
"""
Verify that calling ``Context.use_certificate_file`` with the given
filename doesn't raise an exception.
"""
# TODO
# Hard to assert anything. But we could set a privatekey then ask
# OpenSSL if the cert and key agree using check_privatekey. Then as
# long as check_privatekey works right we're good...
with open(certificate_file, "wb") as pem_file:
pem_file.write(cleartextCertificatePEM)
ctx = Context(TLSv1_METHOD)
ctx.use_certificate_file(certificate_file)
def test_use_certificate_file_bytes(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._use_certificate_file_test(filename)
def test_use_certificate_file_unicode(self, tmpfile):
"""
`Context.use_certificate_file` sets the certificate (given as a
`bytes` filename) which will be used to identify connections created
using the context.
"""
filename = tmpfile.decode(getfilesystemencoding()) + NON_ASCII
self._use_certificate_file_test(filename)
def test_check_privatekey_valid(self):
"""
`Context.check_privatekey` returns `None` if the `Context` instance
has been configured to use a matched key and certificate pair.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, client_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
assert None is context.check_privatekey()
def test_check_privatekey_invalid(self):
"""
`Context.check_privatekey` raises `Error` if the `Context` instance
has been configured to use a key and certificate pair which don't
relate to each other.
"""
key = load_privatekey(FILETYPE_PEM, client_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
context = Context(TLSv1_METHOD)
context.use_privatekey(key)
context.use_certificate(cert)
with pytest.raises(Error):
context.check_privatekey()
def test_app_data(self):
"""
`Context.set_app_data` stores an object for later retrieval
using `Context.get_app_data`.
"""
app_data = object()
context = Context(TLSv1_METHOD)
context.set_app_data(app_data)
assert context.get_app_data() is app_data
def test_set_options_wrong_args(self):
"""
`Context.set_options` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_options(None)
def test_set_options(self):
"""
`Context.set_options` returns the new options value.
"""
context = Context(TLSv1_METHOD)
options = context.set_options(OP_NO_SSLv2)
assert options & OP_NO_SSLv2 == OP_NO_SSLv2
def test_set_mode_wrong_args(self):
"""
`Context.set_mode` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_mode(None)
def test_set_mode(self):
"""
`Context.set_mode` accepts a mode bitvector and returns the
newly set mode.
"""
context = Context(TLSv1_METHOD)
assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)
def test_set_timeout_wrong_args(self):
"""
`Context.set_timeout` raises `TypeError` if called with
a non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_timeout(None)
def test_timeout(self):
"""
`Context.set_timeout` sets the session timeout for all connections
created using the context object. `Context.get_timeout` retrieves
this value.
"""
context = Context(TLSv1_METHOD)
context.set_timeout(1234)
assert context.get_timeout() == 1234
def test_set_verify_depth_wrong_args(self):
"""
`Context.set_verify_depth` raises `TypeError` if called with a
non-`int` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify_depth(None)
def test_verify_depth(self):
"""
`Context.set_verify_depth` sets the number of certificates in
a chain to follow before giving up. The value can be retrieved with
`Context.get_verify_depth`.
"""
context = Context(TLSv1_METHOD)
context.set_verify_depth(11)
assert context.get_verify_depth() == 11
def _write_encrypted_pem(self, passphrase, tmpfile):
"""
Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file.
"""
key = PKey()
key.generate_key(TYPE_RSA, 512)
pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase)
with open(tmpfile, 'w') as fObj:
fObj.write(pem.decode('ascii'))
return tmpfile
def test_set_passwd_cb_wrong_args(self):
"""
`Context.set_passwd_cb` raises `TypeError` if called with a
non-callable first argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_passwd_cb(None)
def test_set_passwd_cb(self, tmpfile):
"""
`Context.set_passwd_cb` accepts a callable which will be invoked when
a private key is loaded from an encrypted PEM.
"""
passphrase = b"foobar"
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
calledWith = []
def passphraseCallback(maxlen, verify, extra):
calledWith.append((maxlen, verify, extra))
return passphrase
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
context.use_privatekey_file(pemFile)
assert len(calledWith) == 1
assert isinstance(calledWith[0][0], int)
assert isinstance(calledWith[0][1], int)
assert calledWith[0][2] is None
def test_passwd_callback_exception(self, tmpfile):
"""
`Context.use_privatekey_file` propagates any exception raised
by the passphrase callback.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
raise RuntimeError("Sorry, I am a fail.")
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(RuntimeError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_false(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a false value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return b""
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
with pytest.raises(Error):
context.use_privatekey_file(pemFile)
def test_passwd_callback_non_string(self, tmpfile):
"""
`Context.use_privatekey_file` raises `OpenSSL.SSL.Error` if the
passphrase callback returns a true non-string value.
"""
pemFile = self._write_encrypted_pem(b"monkeys are nice", tmpfile)
def passphraseCallback(maxlen, verify, extra):
return 10
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# TODO: Surely this is the wrong error?
with pytest.raises(ValueError):
context.use_privatekey_file(pemFile)
def test_passwd_callback_too_long(self, tmpfile):
"""
If the passphrase returned by the passphrase callback returns a string
longer than the indicated maximum length, it is truncated.
"""
# A priori knowledge!
passphrase = b"x" * 1024
pemFile = self._write_encrypted_pem(passphrase, tmpfile)
def passphraseCallback(maxlen, verify, extra):
assert maxlen == 1024
return passphrase + b"y"
context = Context(TLSv1_METHOD)
context.set_passwd_cb(passphraseCallback)
# This shall succeed because the truncated result is the correct
# passphrase.
context.use_privatekey_file(pemFile)
def test_set_info_callback(self):
"""
`Context.set_info_callback` accepts a callable which will be
invoked when certain information about an SSL connection is available.
"""
(server, client) = socket_pair()
clientSSL = Connection(Context(TLSv1_METHOD), client)
clientSSL.set_connect_state()
called = []
def info(conn, where, ret):
called.append((conn, where, ret))
context = Context(TLSv1_METHOD)
context.set_info_callback(info)
context.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
context.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(context, server)
serverSSL.set_accept_state()
handshake(clientSSL, serverSSL)
# The callback must always be called with a Connection instance as the
# first argument. It would probably be better to split this into
# separate tests for client and server side info callbacks so we could
# assert it is called with the right Connection instance. It would
# also be good to assert *something* about `where` and `ret`.
notConnections = [
conn for (conn, where, ret) in called
if not isinstance(conn, Connection)]
assert [] == notConnections, (
"Some info callback arguments were not Connection instances.")
def _load_verify_locations_test(self, *args):
"""
Create a client context which will verify the peer certificate and call
its `load_verify_locations` method with the given arguments.
Then connect it to a server and ensure that the handshake succeeds.
"""
(server, client) = socket_pair()
clientContext = Context(TLSv1_METHOD)
clientContext.load_verify_locations(*args)
# Require that the server certificate verify properly or the
# connection will fail.
clientContext.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
clientSSL = Connection(clientContext, client)
clientSSL.set_connect_state()
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverSSL = Connection(serverContext, server)
serverSSL.set_accept_state()
# Without load_verify_locations above, the handshake
# will fail:
# Error: [('SSL routines', 'SSL3_GET_SERVER_CERTIFICATE',
# 'certificate verify failed')]
handshake(clientSSL, serverSSL)
cert = clientSSL.get_peer_certificate()
assert cert.get_subject().CN == 'Testing Root CA'
def _load_verify_cafile(self, cafile):
"""
Verify that if path to a file containing a certificate is passed to
`Context.load_verify_locations` for the ``cafile`` parameter, that
certificate is used as a trust root for the purposes of verifying
connections created using that `Context`.
"""
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(cafile)
def test_load_verify_bytes_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
cafile = tmpfile + NON_ASCII.encode(getfilesystemencoding())
self._load_verify_cafile(cafile)
def test_load_verify_unicode_cafile(self, tmpfile):
"""
`Context.load_verify_locations` accepts a file name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_cafile(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_invalid_file(self, tmpfile):
"""
`Context.load_verify_locations` raises `Error` when passed a
non-existent cafile.
"""
clientContext = Context(TLSv1_METHOD)
with pytest.raises(Error):
clientContext.load_verify_locations(tmpfile)
def _load_verify_directory_locations_capath(self, capath):
"""
Verify that if path to a directory containing certificate files is
passed to ``Context.load_verify_locations`` for the ``capath``
parameter, those certificates are used as trust roots for the purposes
of verifying connections created using that ``Context``.
"""
makedirs(capath)
# Hash values computed manually with c_rehash to avoid depending on
# c_rehash in the test suite. One is from OpenSSL 0.9.8, the other
# from OpenSSL 1.0.0.
for name in [b'c7adac82.0', b'c3705638.0']:
cafile = join_bytes_or_unicode(capath, name)
with open(cafile, 'w') as fObj:
fObj.write(cleartextCertificatePEM.decode('ascii'))
self._load_verify_locations_test(None, capath)
def test_load_verify_directory_bytes_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `bytes`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_load_verify_directory_unicode_capath(self, tmpfile):
"""
`Context.load_verify_locations` accepts a directory name as a `unicode`
instance and uses the certificates within for verification purposes.
"""
self._load_verify_directory_locations_capath(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_load_verify_locations_wrong_args(self):
"""
`Context.load_verify_locations` raises `TypeError` if with non-`str`
arguments.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_verify_locations(object())
with pytest.raises(TypeError):
context.load_verify_locations(object(), object())
@pytest.mark.skipif(
not platform.startswith("linux"),
reason="Loading fallback paths is a linux-specific behavior to "
"accommodate pyca/cryptography manylinux1 wheels"
)
def test_fallback_default_verify_paths(self, monkeypatch):
"""
Test that we load certificates successfully on linux from the fallback
path. To do this we set the _CRYPTOGRAPHY_MANYLINUX1_CA_FILE and
_CRYPTOGRAPHY_MANYLINUX1_CA_DIR vars to be equal to whatever the
current OpenSSL default is and we disable
SSL_CTX_SET_default_verify_paths so that it can't find certs unless
it loads via fallback.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_FILE",
_ffi.string(_lib.X509_get_default_cert_file())
)
monkeypatch.setattr(
SSL,
"_CRYPTOGRAPHY_MANYLINUX1_CA_DIR",
_ffi.string(_lib.X509_get_default_cert_dir())
)
context.set_default_verify_paths()
store = context.get_cert_store()
sk_obj = _lib.X509_STORE_get0_objects(store._store)
assert sk_obj != _ffi.NULL
num = _lib.sk_X509_OBJECT_num(sk_obj)
assert num != 0
def test_check_env_vars(self, monkeypatch):
"""
Test that we return True/False appropriately if the env vars are set.
"""
context = Context(TLSv1_METHOD)
dir_var = "CUSTOM_DIR_VAR"
file_var = "CUSTOM_FILE_VAR"
assert context._check_env_vars_set(dir_var, file_var) is False
monkeypatch.setenv(dir_var, "value")
monkeypatch.setenv(file_var, "value")
assert context._check_env_vars_set(dir_var, file_var) is True
assert context._check_env_vars_set(dir_var, file_var) is True
def test_verify_no_fallback_if_env_vars_set(self, monkeypatch):
"""
Test that we don't use the fallback path if env vars are set.
"""
context = Context(TLSv1_METHOD)
monkeypatch.setattr(
_lib, "SSL_CTX_set_default_verify_paths", lambda x: 1
)
dir_env_var = _ffi.string(
_lib.X509_get_default_cert_dir_env()
).decode("ascii")
file_env_var = _ffi.string(
_lib.X509_get_default_cert_file_env()
).decode("ascii")
monkeypatch.setenv(dir_env_var, "value")
monkeypatch.setenv(file_env_var, "value")
context.set_default_verify_paths()
monkeypatch.setattr(
context,
"_fallback_default_verify_paths",
raiser(SystemError)
)
context.set_default_verify_paths()
@pytest.mark.skipif(
platform == "win32",
reason="set_default_verify_paths appears not to work on Windows. "
"See LP#404343 and LP#404344."
)
def test_set_default_verify_paths(self):
"""
`Context.set_default_verify_paths` causes the platform-specific CA
certificate locations to be used for verification purposes.
"""
# Testing this requires a server with a certificate signed by one
# of the CAs in the platform CA location. Getting one of those
# costs money. Fortunately (or unfortunately, depending on your
# perspective), it's easy to think of a public server on the
# internet which has such a certificate. Connecting to the network
# in a unit test is bad, but it's the only way I can think of to
# really test this. -exarkun
context = Context(SSLv23_METHOD)
context.set_default_verify_paths()
context.set_verify(
VERIFY_PEER,
lambda conn, cert, errno, depth, preverify_ok: preverify_ok)
client = socket_any_family()
client.connect(("encrypted.google.com", 443))
clientSSL = Connection(context, client)
clientSSL.set_connect_state()
clientSSL.set_tlsext_host_name(b"encrypted.google.com")
clientSSL.do_handshake()
clientSSL.send(b"GET / HTTP/1.0\r\n\r\n")
assert clientSSL.recv(1024)
def test_fallback_path_is_not_file_or_dir(self):
"""
Test that when passed empty arrays or paths that do not exist no
errors are raised.
"""
context = Context(TLSv1_METHOD)
context._fallback_default_verify_paths([], [])
context._fallback_default_verify_paths(
["/not/a/file"], ["/not/a/dir"]
)
def test_add_extra_chain_cert_invalid_cert(self):
"""
`Context.add_extra_chain_cert` raises `TypeError` if called with an
object which is not an instance of `X509`.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.add_extra_chain_cert(object())
def _handshake_test(self, serverContext, clientContext):
"""
Verify that a client and server created with the given contexts can
successfully handshake and communicate.
"""
serverSocket, clientSocket = socket_pair()
server = Connection(serverContext, serverSocket)
server.set_accept_state()
client = Connection(clientContext, clientSocket)
client.set_connect_state()
# Make them talk to each other.
# interact_in_memory(client, server)
for _ in range(3):
for s in [client, server]:
try:
s.do_handshake()
except WantReadError:
pass
def test_set_verify_callback_connection_argument(self):
"""
The first argument passed to the verify callback is the
`Connection` instance for which verification is taking place.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
class VerifyCallback(object):
def callback(self, connection, *args):
self.connection = connection
return 1
verify = VerifyCallback()
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify.callback)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
assert verify.connection is clientConnection
def test_x509_in_verify_works(self):
"""
We had a bug where the X509 cert instantiated in the callback wrapper
didn't __init__ so it was missing objects needed when calling
get_subject. This test sets up a handshake where we call get_subject
on the cert provided to the verify callback.
"""
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
serverConnection = Connection(serverContext, None)
def verify_cb_get_subject(conn, cert, errnum, depth, ok):
assert cert.get_subject()
return 1
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)
clientConnection = Connection(clientContext, None)
clientConnection.set_connect_state()
handshake_in_memory(clientConnection, serverConnection)
def test_set_verify_callback_exception(self):
"""
If the verify callback passed to `Context.set_verify` raises an
exception, verification fails and the exception is propagated to the
caller of `Connection.do_handshake`.
"""
serverContext = Context(TLSv1_2_METHOD)
serverContext.use_privatekey(
load_privatekey(FILETYPE_PEM, cleartextPrivateKeyPEM))
serverContext.use_certificate(
load_certificate(FILETYPE_PEM, cleartextCertificatePEM))
clientContext = Context(TLSv1_2_METHOD)
def verify_callback(*args):
raise Exception("silly verify failure")
clientContext.set_verify(VERIFY_PEER, verify_callback)
with pytest.raises(Exception) as exc:
self._handshake_test(serverContext, clientContext)
assert "silly verify failure" == str(exc.value)
def test_add_extra_chain_cert(self, tmpdir):
"""
`Context.add_extra_chain_cert` accepts an `X509`
instance to add to the certificate chain.
See `_create_certificate_chain` for the details of the
certificate chain tested.
The chain is tested by starting a server with scert and connecting
to it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
# Dump the CA certificate to a file because that's the only way to load
# it as a trusted CA in the client context.
for cert, name in [(cacert, 'ca.pem'),
(icert, 'i.pem'),
(scert, 's.pem')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_certificate(FILETYPE_PEM, cert).decode('ascii'))
for key, name in [(cakey, 'ca.key'),
(ikey, 'i.key'),
(skey, 's.key')]:
with tmpdir.join(name).open('w') as f:
f.write(dump_privatekey(FILETYPE_PEM, key).decode('ascii'))
# Create the server context
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
# The client already has cacert, we only need to give them icert.
serverContext.add_extra_chain_cert(icert)
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(str(tmpdir.join("ca.pem")))
# Try it out.
self._handshake_test(serverContext, clientContext)
def _use_certificate_chain_file_test(self, certdir):
"""
Verify that `Context.use_certificate_chain_file` reads a
certificate chain from a specified file.
The chain is tested by starting a server with scert and connecting to
it with a client which trusts cacert and requires verification to
succeed.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
makedirs(certdir)
chainFile = join_bytes_or_unicode(certdir, "chain.pem")
caFile = join_bytes_or_unicode(certdir, "ca.pem")
# Write out the chain file.
with open(chainFile, 'wb') as fObj:
# Most specific to least general.
fObj.write(dump_certificate(FILETYPE_PEM, scert))
fObj.write(dump_certificate(FILETYPE_PEM, icert))
fObj.write(dump_certificate(FILETYPE_PEM, cacert))
with open(caFile, 'w') as fObj:
fObj.write(dump_certificate(FILETYPE_PEM, cacert).decode('ascii'))
serverContext = Context(TLSv1_METHOD)
serverContext.use_certificate_chain_file(chainFile)
serverContext.use_privatekey(skey)
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT, verify_cb)
clientContext.load_verify_locations(caFile)
self._handshake_test(serverContext, clientContext)
def test_use_certificate_chain_file_bytes(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``bytes``) to specify additional certificates to use to
construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding())
)
def test_use_certificate_chain_file_unicode(self, tmpfile):
"""
``Context.use_certificate_chain_file`` accepts the name of a file (as
an instance of ``unicode``) to specify additional certificates to use
to construct and verify a trust chain.
"""
self._use_certificate_chain_file_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII
)
def test_use_certificate_chain_file_wrong_args(self):
"""
`Context.use_certificate_chain_file` raises `TypeError` if passed a
non-byte string single argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.use_certificate_chain_file(object())
def test_use_certificate_chain_file_missing_file(self, tmpfile):
"""
`Context.use_certificate_chain_file` raises `OpenSSL.SSL.Error` when
passed a bad chain file name (for example, the name of a file which
does not exist).
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.use_certificate_chain_file(tmpfile)
def test_set_verify_mode(self):
"""
`Context.get_verify_mode` returns the verify mode flags previously
passed to `Context.set_verify`.
"""
context = Context(TLSv1_METHOD)
assert context.get_verify_mode() == 0
context.set_verify(
VERIFY_PEER | VERIFY_CLIENT_ONCE, lambda *args: None)
assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)
@pytest.mark.parametrize('mode', [None, 1.0, object(), 'mode'])
def test_set_verify_wrong_mode_arg(self, mode):
"""
`Context.set_verify` raises `TypeError` if the first argument is
not an integer.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=mode, callback=lambda *args: None)
@pytest.mark.parametrize('callback', [None, 1.0, 'mode', ('foo', 'bar')])
def test_set_verify_wrong_callable_arg(self, callback):
"""
`Context.set_verify` raises `TypeError` if the second argument
is not callable.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_verify(mode=VERIFY_PEER, callback=callback)
def test_load_tmp_dh_wrong_args(self):
"""
`Context.load_tmp_dh` raises `TypeError` if called with a
non-`str` argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.load_tmp_dh(object())
def test_load_tmp_dh_missing_file(self):
"""
`Context.load_tmp_dh` raises `OpenSSL.SSL.Error` if the
specified file does not exist.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.load_tmp_dh(b"hello")
def _load_tmp_dh_test(self, dhfilename):
"""
Verify that calling ``Context.load_tmp_dh`` with the given filename
does not raise an exception.
"""
context = Context(TLSv1_METHOD)
with open(dhfilename, "w") as dhfile:
dhfile.write(dhparam)
context.load_tmp_dh(dhfilename)
# XXX What should I assert here? -exarkun
def test_load_tmp_dh_bytes(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``bytes``).
"""
self._load_tmp_dh_test(
tmpfile + NON_ASCII.encode(getfilesystemencoding()),
)
def test_load_tmp_dh_unicode(self, tmpfile):
"""
`Context.load_tmp_dh` loads Diffie-Hellman parameters from the
specified file (given as ``unicode``).
"""
self._load_tmp_dh_test(
tmpfile.decode(getfilesystemencoding()) + NON_ASCII,
)
def test_set_tmp_ecdh(self):
"""
`Context.set_tmp_ecdh` sets the elliptic curve for Diffie-Hellman to
the specified curve.
"""
context = Context(TLSv1_METHOD)
for curve in get_elliptic_curves():
if curve.name.startswith(u"Oakley-"):
# Setting Oakley-EC2N-4 and Oakley-EC2N-3 adds
# ('bignum routines', 'BN_mod_inverse', 'no inverse') to the
# error queue on OpenSSL 1.0.2.
continue
# The only easily "assertable" thing is that it does not raise an
# exception.
context.set_tmp_ecdh(curve)
def test_set_session_cache_mode_wrong_args(self):
"""
`Context.set_session_cache_mode` raises `TypeError` if called with
a non-integer argument.
called with other than one integer argument.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_session_cache_mode(object())
def test_session_cache_mode(self):
"""
`Context.set_session_cache_mode` specifies how sessions are cached.
The setting can be retrieved via `Context.get_session_cache_mode`.
"""
context = Context(TLSv1_METHOD)
context.set_session_cache_mode(SESS_CACHE_OFF)
off = context.set_session_cache_mode(SESS_CACHE_BOTH)
assert SESS_CACHE_OFF == off
assert SESS_CACHE_BOTH == context.get_session_cache_mode()
def test_get_cert_store(self):
"""
`Context.get_cert_store` returns a `X509Store` instance.
"""
context = Context(TLSv1_METHOD)
store = context.get_cert_store()
assert isinstance(store, X509Store)
def test_set_tlsext_use_srtp_not_bytes(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises a TypeError if the list of profiles is not a byte string.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
context.set_tlsext_use_srtp(text_type('SRTP_AES128_CM_SHA1_80'))
def test_set_tlsext_use_srtp_invalid_profile(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It raises an Error if the call to OpenSSL fails.
"""
context = Context(TLSv1_METHOD)
with pytest.raises(Error):
context.set_tlsext_use_srtp(b'SRTP_BOGUS')
def test_set_tlsext_use_srtp_valid(self):
"""
`Context.set_tlsext_use_srtp' enables negotiating SRTP keying material.
It does not return anything.
"""
context = Context(TLSv1_METHOD)
assert context.set_tlsext_use_srtp(b'SRTP_AES128_CM_SHA1_80') is None
class TestServerNameCallback(object):
"""
Tests for `Context.set_tlsext_servername_callback` and its
interaction with `Connection`.
"""
def test_old_callback_forgotten(self):
"""
If `Context.set_tlsext_servername_callback` is used to specify
a new callback, the one it replaces is dereferenced.
"""
def callback(connection): # pragma: no cover
pass
def replacement(connection): # pragma: no cover
pass
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(callback)
tracker = ref(callback)
del callback
context.set_tlsext_servername_callback(replacement)
# One run of the garbage collector happens to work on CPython. PyPy
# doesn't collect the underlying object until a second run for whatever
# reason. That's fine, it still demonstrates our code has properly
# dropped the reference.
collect()
collect()
callback = tracker()
if callback is not None:
referrers = get_referrers(callback)
if len(referrers) > 1: # pragma: nocover
pytest.fail("Some references remain: %r" % (referrers,))
def test_no_servername(self):
"""
When a client specifies no server name, the callback passed to
`Context.set_tlsext_servername_callback` is invoked and the
result of `Connection.get_servername` is `None`.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Lose our reference to it. The Context is responsible for keeping it
# alive now.
del servername
collect()
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(server, client)
assert args == [(server, None)]
def test_servername(self):
"""
When a client specifies a server name in its hello message, the
callback passed to `Contexts.set_tlsext_servername_callback` is
invoked and the result of `Connection.get_servername` is that
server name.
"""
args = []
def servername(conn):
args.append((conn, conn.get_servername()))
context = Context(TLSv1_METHOD)
context.set_tlsext_servername_callback(servername)
# Necessary to actually accept the connection
context.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(context, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
client.set_tlsext_host_name(b"foo1.example.com")
interact_in_memory(server, client)
assert args == [(server, b"foo1.example.com")]
@pytest.mark.skipif(
not _lib.Cryptography_HAS_NEXTPROTONEG, reason="NPN is not available"
)
class TestNextProtoNegotiation(object):
"""
Test for Next Protocol Negotiation in PyOpenSSL.
"""
def test_npn_success(self):
"""
Tests that clients and servers that agree on the negotiated next
protocol can correct establish a connection, and that the agreed
protocol is reported by the connections.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
assert server.get_next_proto_negotiated() == b'spdy/2'
assert client.get_next_proto_negotiated() == b'spdy/2'
def test_npn_client_fail(self):
"""
Tests that when clients and servers cannot agree on what protocol
to use next that the TLS connection does not get established.
"""
advertise_args = []
select_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert advertise_args == [(server,)]
assert select_args == [(client, [b'http/1.1', b'spdy/2'])]
def test_npn_select_error(self):
"""
Test that we can handle exceptions in the select callback. If
select fails it should be fatal to the connection.
"""
advertise_args = []
def advertise(conn):
advertise_args.append((conn,))
return [b'http/1.1', b'spdy/2']
def select(conn, options):
raise TypeError
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the callback throws an exception it should be raised here.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert advertise_args == [(server,), ]
def test_npn_advertise_error(self):
"""
Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client.
"""
select_args = []
def advertise(conn):
raise TypeError
def select(conn, options): # pragma: nocover
"""
Assert later that no args are actually appended.
"""
select_args.append((conn, options))
return b''
server_context = Context(TLSv1_METHOD)
server_context.set_npn_advertise_callback(advertise)
client_context = Context(TLSv1_METHOD)
client_context.set_npn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == []
class TestApplicationLayerProtoNegotiation(object):
"""
Tests for ALPN in PyOpenSSL.
"""
# Skip tests on versions that don't support ALPN.
if _lib.Cryptography_HAS_ALPN:
def test_alpn_success(self):
"""
Clients and servers that agree on the negotiated ALPN protocol can
correct establish a connection, and the agreed protocol is reported
by the connections.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_set_on_connection(self):
"""
The same as test_alpn_success, but setting the ALPN protocols on
the connection rather than the context.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b'spdy/2'
# Setup the client context but don't set any ALPN protocols.
client_context = Context(TLSv1_METHOD)
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
# Set the ALPN protocols on the client connection.
client = Connection(client_context, None)
client.set_alpn_protos([b'http/1.1', b'spdy/2'])
client.set_connect_state()
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
assert server.get_alpn_proto_negotiated() == b'spdy/2'
assert client.get_alpn_proto_negotiated() == b'spdy/2'
def test_alpn_server_fail(self):
"""
When clients and servers cannot agree on what protocol to use next
the TLS connection does not get established.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
return b''
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# If the client doesn't return anything, the connection will fail.
with pytest.raises(Error):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
def test_alpn_no_server(self):
"""
When clients and servers cannot agree on what protocol to use next
because the server doesn't offer ALPN, no protocol is negotiated.
"""
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
# Do the dance.
interact_in_memory(server, client)
assert client.get_alpn_proto_negotiated() == b''
def test_alpn_callback_exception(self):
"""
We can handle exceptions in the ALPN select callback.
"""
select_args = []
def select(conn, options):
select_args.append((conn, options))
raise TypeError()
client_context = Context(TLSv1_METHOD)
client_context.set_alpn_protos([b'http/1.1', b'spdy/2'])
server_context = Context(TLSv1_METHOD)
server_context.set_alpn_select_callback(select)
# Necessary to actually accept the connection
server_context.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_context.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
# Do a little connection to trigger the logic
server = Connection(server_context, None)
server.set_accept_state()
client = Connection(client_context, None)
client.set_connect_state()
with pytest.raises(TypeError):
interact_in_memory(server, client)
assert select_args == [(server, [b'http/1.1', b'spdy/2'])]
else:
# No ALPN.
def test_alpn_not_implemented(self):
"""
If ALPN is not in OpenSSL, we should raise NotImplementedError.
"""
# Test the context methods first.
context = Context(TLSv1_METHOD)
with pytest.raises(NotImplementedError):
context.set_alpn_protos(None)
with pytest.raises(NotImplementedError):
context.set_alpn_select_callback(None)
# Now test a connection.
conn = Connection(context)
with pytest.raises(NotImplementedError):
conn.set_alpn_protos(None)
class TestSession(object):
"""
Unit tests for :py:obj:`OpenSSL.SSL.Session`.
"""
def test_construction(self):
"""
:py:class:`Session` can be constructed with no arguments, creating
a new instance of that type.
"""
new_session = Session()
assert isinstance(new_session, Session)
class TestConnection(object):
"""
Unit tests for `OpenSSL.SSL.Connection`.
"""
# XXX get_peer_certificate -> None
# XXX sock_shutdown
# XXX master_key -> TypeError
# XXX server_random -> TypeError
# XXX connect -> TypeError
# XXX connect_ex -> TypeError
# XXX set_connect_state -> TypeError
# XXX set_accept_state -> TypeError
# XXX do_handshake -> TypeError
# XXX bio_read -> TypeError
# XXX recv -> TypeError
# XXX send -> TypeError
# XXX bio_write -> TypeError
def test_type(self):
"""
`Connection` can be used to create instances of that type.
"""
ctx = Context(TLSv1_METHOD)
assert is_consistent_type(Connection, 'Connection', ctx, None)
@pytest.mark.parametrize('bad_context', [object(), 'context', None, 1])
def test_wrong_args(self, bad_context):
"""
`Connection.__init__` raises `TypeError` if called with a non-`Context`
instance argument.
"""
with pytest.raises(TypeError):
Connection(bad_context)
def test_get_context(self):
"""
`Connection.get_context` returns the `Context` instance used to
construct the `Connection` instance.
"""
context = Context(TLSv1_METHOD)
connection = Connection(context, None)
assert connection.get_context() is context
def test_set_context_wrong_args(self):
"""
`Connection.set_context` raises `TypeError` if called with a
non-`Context` instance argument.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_context(object())
with pytest.raises(TypeError):
connection.set_context("hello")
with pytest.raises(TypeError):
connection.set_context(1)
assert ctx is connection.get_context()
def test_set_context(self):
"""
`Connection.set_context` specifies a new `Context` instance to be
used for the connection.
"""
original = Context(SSLv23_METHOD)
replacement = Context(TLSv1_METHOD)
connection = Connection(original, None)
connection.set_context(replacement)
assert replacement is connection.get_context()
# Lose our references to the contexts, just in case the Connection
# isn't properly managing its own contributions to their reference
# counts.
del original, replacement
collect()
def test_set_tlsext_host_name_wrong_args(self):
"""
If `Connection.set_tlsext_host_name` is called with a non-byte string
argument or a byte string with an embedded NUL, `TypeError` is raised.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
conn.set_tlsext_host_name(object())
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"with\0null")
if PY3:
# On Python 3.x, don't accidentally implicitly convert from text.
with pytest.raises(TypeError):
conn.set_tlsext_host_name(b"example.com".decode("ascii"))
def test_pending(self):
"""
`Connection.pending` returns the number of bytes available for
immediate read.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.pending() == 0
def test_peek(self):
"""
`Connection.recv` peeks into the connection if `socket.MSG_PEEK` is
passed.
"""
server, client = loopback()
server.send(b'xy')
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2, MSG_PEEK) == b'xy'
assert client.recv(2) == b'xy'
def test_connect_wrong_args(self):
"""
`Connection.connect` raises `TypeError` if called with a non-address
argument.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
with pytest.raises(TypeError):
connection.connect(None)
def test_connect_refused(self):
"""
`Connection.connect` raises `socket.error` if the underlying socket
connect method raises it.
"""
client = socket_any_family()
context = Context(TLSv1_METHOD)
clientSSL = Connection(context, client)
# pytest.raises here doesn't work because of a bug in py.test on Python
# 2.6: https://github.com/pytest-dev/pytest/issues/988
try:
clientSSL.connect((loopback_address(client), 1))
except error as e:
exc = e
assert exc.args[0] == ECONNREFUSED
def test_connect(self):
"""
`Connection.connect` establishes a connection to the specified address.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.connect((loopback_address(port), port.getsockname()[1]))
# XXX An assertion? Or something?
@pytest.mark.skipif(
platform == "darwin",
reason="connect_ex sometimes causes a kernel panic on OS X 10.6.4"
)
def test_connect_ex(self):
"""
If there is a connection error, `Connection.connect_ex` returns the
errno instead of raising an exception.
"""
port = socket_any_family()
port.bind(('', 0))
port.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
clientSSL.setblocking(False)
result = clientSSL.connect_ex(port.getsockname())
expected = (EINPROGRESS, EWOULDBLOCK)
assert result in expected
def test_accept(self):
"""
`Connection.accept` accepts a pending connection attempt and returns a
tuple of a new `Connection` (the accepted client) and the address the
connection originated from.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
port = socket_any_family()
portSSL = Connection(ctx, port)
portSSL.bind(('', 0))
portSSL.listen(3)
clientSSL = Connection(Context(TLSv1_METHOD), socket(port.family))
# Calling portSSL.getsockname() here to get the server IP address
# sounds great, but frequently fails on Windows.
clientSSL.connect((loopback_address(port), portSSL.getsockname()[1]))
serverSSL, address = portSSL.accept()
assert isinstance(serverSSL, Connection)
assert serverSSL.get_context() is ctx
assert address == clientSSL.getsockname()
def test_shutdown_wrong_args(self):
"""
`Connection.set_shutdown` raises `TypeError` if called with arguments
other than integers.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.set_shutdown(None)
def test_shutdown(self):
"""
`Connection.shutdown` performs an SSL-level connection shutdown.
"""
server, client = loopback()
assert not server.shutdown()
assert server.get_shutdown() == SENT_SHUTDOWN
with pytest.raises(ZeroReturnError):
client.recv(1024)
assert client.get_shutdown() == RECEIVED_SHUTDOWN
client.shutdown()
assert client.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
with pytest.raises(ZeroReturnError):
server.recv(1024)
assert server.get_shutdown() == (SENT_SHUTDOWN | RECEIVED_SHUTDOWN)
def test_shutdown_closed(self):
"""
If the underlying socket is closed, `Connection.shutdown` propagates
the write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as exc:
server.shutdown()
if platform == "win32":
assert exc.value.args[0] == ESHUTDOWN
else:
assert exc.value.args[0] == EPIPE
def test_shutdown_truncated(self):
"""
If the underlying connection is truncated, `Connection.shutdown`
raises an `Error`.
"""
server_ctx = Context(TLSv1_METHOD)
client_ctx = Context(TLSv1_METHOD)
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(server_ctx, None)
client = Connection(client_ctx, None)
handshake_in_memory(client, server)
assert not server.shutdown()
with pytest.raises(WantReadError):
server.shutdown()
server.bio_shutdown()
with pytest.raises(Error):
server.shutdown()
def test_set_shutdown(self):
"""
`Connection.set_shutdown` sets the state of the SSL connection
shutdown process.
"""
connection = Connection(Context(TLSv1_METHOD), socket_any_family())
connection.set_shutdown(RECEIVED_SHUTDOWN)
assert connection.get_shutdown() == RECEIVED_SHUTDOWN
def test_state_string(self):
"""
`Connection.state_string` verbosely describes the current state of
the `Connection`.
"""
server, client = socket_pair()
server = loopback_server_factory(server)
client = loopback_client_factory(client)
assert server.get_state_string() in [
b"before/accept initialization", b"before SSL initialization"
]
assert client.get_state_string() in [
b"before/connect initialization", b"before SSL initialization"
]
def test_app_data(self):
"""
Any object can be set as app data by passing it to
`Connection.set_app_data` and later retrieved with
`Connection.get_app_data`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
assert None is conn.get_app_data()
app_data = object()
conn.set_app_data(app_data)
assert conn.get_app_data() is app_data
def test_makefile(self):
"""
`Connection.makefile` is not implemented and calling that
method raises `NotImplementedError`.
"""
conn = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(NotImplementedError):
conn.makefile()
def test_get_certificate(self):
"""
`Connection.get_certificate` returns the local certificate.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
context = Context(TLSv1_METHOD)
context.use_certificate(scert)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is not None
assert "Server Certificate" == cert.get_subject().CN
def test_get_certificate_none(self):
"""
`Connection.get_certificate` returns the local certificate.
If there is no certificate, it returns None.
"""
context = Context(TLSv1_METHOD)
client = Connection(context, None)
cert = client.get_certificate()
assert cert is None
def test_get_peer_cert_chain(self):
"""
`Connection.get_peer_cert_chain` returns a list of certificates
which the connected server returned for the certification verification.
"""
chain = _create_certificate_chain()
[(cakey, cacert), (ikey, icert), (skey, scert)] = chain
serverContext = Context(TLSv1_METHOD)
serverContext.use_privatekey(skey)
serverContext.use_certificate(scert)
serverContext.add_extra_chain_cert(icert)
serverContext.add_extra_chain_cert(cacert)
server = Connection(serverContext, None)
server.set_accept_state()
# Create the client
clientContext = Context(TLSv1_METHOD)
clientContext.set_verify(VERIFY_NONE, verify_cb)
client = Connection(clientContext, None)
client.set_connect_state()
interact_in_memory(client, server)
chain = client.get_peer_cert_chain()
assert len(chain) == 3
assert "Server Certificate" == chain[0].get_subject().CN
assert "Intermediate Certificate" == chain[1].get_subject().CN
assert "Authority Certificate" == chain[2].get_subject().CN
def test_get_peer_cert_chain_none(self):
"""
`Connection.get_peer_cert_chain` returns `None` if the peer sends
no certificate chain.
"""
ctx = Context(TLSv1_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
server = Connection(ctx, None)
server.set_accept_state()
client = Connection(Context(TLSv1_METHOD), None)
client.set_connect_state()
interact_in_memory(client, server)
assert None is server.get_peer_cert_chain()
def test_get_session_unconnected(self):
"""
`Connection.get_session` returns `None` when used with an object
which has not been connected.
"""
ctx = Context(TLSv1_METHOD)
server = Connection(ctx, None)
session = server.get_session()
assert None is session
def test_server_get_session(self):
"""
On the server side of a connection, `Connection.get_session` returns a
`Session` instance representing the SSL session for that connection.
"""
server, client = loopback()
session = server.get_session()
assert isinstance(session, Session)
def test_client_get_session(self):
"""
On the client side of a connection, `Connection.get_session`
returns a `Session` instance representing the SSL session for
that connection.
"""
server, client = loopback()
session = client.get_session()
assert isinstance(session, Session)
def test_set_session_wrong_args(self):
"""
`Connection.set_session` raises `TypeError` if called with an object
that is not an instance of `Session`.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
with pytest.raises(TypeError):
connection.set_session(123)
with pytest.raises(TypeError):
connection.set_session("hello")
with pytest.raises(TypeError):
connection.set_session(object())
def test_client_set_session(self):
"""
`Connection.set_session`, when used prior to a connection being
established, accepts a `Session` instance and causes an attempt to
re-use the session it represents when the SSL handshake is performed.
"""
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(TLSv1_2_METHOD)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
originalServer, originalClient = loopback(
server_factory=makeServer)
originalSession = originalClient.get_session()
def makeClient(socket):
client = loopback_client_factory(socket)
client.set_session(originalSession)
return client
resumedServer, resumedClient = loopback(
server_factory=makeServer,
client_factory=makeClient)
# This is a proxy: in general, we have no access to any unique
# identifier for the session (new enough versions of OpenSSL expose
# a hash which could be usable, but "new enough" is very, very new).
# Instead, exploit the fact that the master key is re-used if the
# session is re-used. As long as the master key for the two
# connections is the same, the session was re-used!
assert originalServer.master_key() == resumedServer.master_key()
def test_set_session_wrong_method(self):
"""
If `Connection.set_session` is passed a `Session` instance associated
with a context using a different SSL method than the `Connection`
is using, a `OpenSSL.SSL.Error` is raised.
"""
# Make this work on both OpenSSL 1.0.0, which doesn't support TLSv1.2
# and also on OpenSSL 1.1.0 which doesn't support SSLv3. (SSL_ST_INIT
# is a way to check for 1.1.0)
if SSL_ST_INIT is None:
v1 = TLSv1_2_METHOD
v2 = TLSv1_METHOD
elif hasattr(_lib, "SSLv3_method"):
v1 = TLSv1_METHOD
v2 = SSLv3_METHOD
else:
pytest.skip("Test requires either OpenSSL 1.1.0 or SSLv3")
key = load_privatekey(FILETYPE_PEM, server_key_pem)
cert = load_certificate(FILETYPE_PEM, server_cert_pem)
ctx = Context(v1)
ctx.use_privatekey(key)
ctx.use_certificate(cert)
ctx.set_session_id("unity-test")
def makeServer(socket):
server = Connection(ctx, socket)
server.set_accept_state()
return server
def makeOriginalClient(socket):
client = Connection(Context(v1), socket)
client.set_connect_state()
return client
originalServer, originalClient = loopback(
server_factory=makeServer, client_factory=makeOriginalClient)
originalSession = originalClient.get_session()
def makeClient(socket):
# Intentionally use a different, incompatible method here.
client = Connection(Context(v2), socket)
client.set_connect_state()
client.set_session(originalSession)
return client
with pytest.raises(Error):
loopback(client_factory=makeClient, server_factory=makeServer)
def test_wantWriteError(self):
"""
`Connection` methods which generate output raise
`OpenSSL.SSL.WantWriteError` if writing to the connection's BIO
fail indicating a should-write state.
"""
client_socket, server_socket = socket_pair()
# Fill up the client's send buffer so Connection won't be able to write
# anything. Only write a single byte at a time so we can be sure we
# completely fill the buffer. Even though the socket API is allowed to
# signal a short write via its return value it seems this doesn't
# always happen on all platforms (FreeBSD and OS X particular) for the
# very last bit of available buffer space.
msg = b"x"
for i in range(1024 * 1024 * 64):
try:
client_socket.send(msg)
except error as e:
if e.errno == EWOULDBLOCK:
break
raise
else:
pytest.fail(
"Failed to fill socket buffer, cannot test BIO want write")
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, client_socket)
# Client's speak first, so make it an SSL client
conn.set_connect_state()
with pytest.raises(WantWriteError):
conn.do_handshake()
# XXX want_read
def test_get_finished_before_connect(self):
"""
`Connection.get_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_finished() is None
def test_get_peer_finished_before_connect(self):
"""
`Connection.get_peer_finished` returns `None` before TLS handshake
is completed.
"""
ctx = Context(TLSv1_METHOD)
connection = Connection(ctx, None)
assert connection.get_peer_finished() is None
def test_get_finished(self):
"""
`Connection.get_finished` method returns the TLS Finished message send
from client, or server. Finished messages are send during
TLS handshake.
"""
server, client = loopback()
assert server.get_finished() is not None
assert len(server.get_finished()) > 0
def test_get_peer_finished(self):
"""
`Connection.get_peer_finished` method returns the TLS Finished
message received from client, or server. Finished messages are send
during TLS handshake.
"""
server, client = loopback()
assert server.get_peer_finished() is not None
assert len(server.get_peer_finished()) > 0
def test_tls_finished_message_symmetry(self):
"""
The TLS Finished message send by server must be the TLS Finished
message received by client.
The TLS Finished message send by client must be the TLS Finished
message received by server.
"""
server, client = loopback()
assert server.get_finished() == client.get_peer_finished()
assert client.get_finished() == server.get_peer_finished()
def test_get_cipher_name_before_connect(self):
"""
`Connection.get_cipher_name` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_name() is None
def test_get_cipher_name(self):
"""
`Connection.get_cipher_name` returns a `unicode` string giving the
name of the currently used cipher.
"""
server, client = loopback()
server_cipher_name, client_cipher_name = \
server.get_cipher_name(), client.get_cipher_name()
assert isinstance(server_cipher_name, text_type)
assert isinstance(client_cipher_name, text_type)
assert server_cipher_name == client_cipher_name
def test_get_cipher_version_before_connect(self):
"""
`Connection.get_cipher_version` returns `None` if no connection
has been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_version() is None
def test_get_cipher_version(self):
"""
`Connection.get_cipher_version` returns a `unicode` string giving
the protocol name of the currently used cipher.
"""
server, client = loopback()
server_cipher_version, client_cipher_version = \
server.get_cipher_version(), client.get_cipher_version()
assert isinstance(server_cipher_version, text_type)
assert isinstance(client_cipher_version, text_type)
assert server_cipher_version == client_cipher_version
def test_get_cipher_bits_before_connect(self):
"""
`Connection.get_cipher_bits` returns `None` if no connection has
been established.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
assert conn.get_cipher_bits() is None
def test_get_cipher_bits(self):
"""
`Connection.get_cipher_bits` returns the number of secret bits
of the currently used cipher.
"""
server, client = loopback()
server_cipher_bits, client_cipher_bits = \
server.get_cipher_bits(), client.get_cipher_bits()
assert isinstance(server_cipher_bits, int)
assert isinstance(client_cipher_bits, int)
assert server_cipher_bits == client_cipher_bits
def test_get_protocol_version_name(self):
"""
`Connection.get_protocol_version_name()` returns a string giving the
protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version_name = client.get_protocol_version_name()
server_protocol_version_name = server.get_protocol_version_name()
assert isinstance(server_protocol_version_name, text_type)
assert isinstance(client_protocol_version_name, text_type)
assert server_protocol_version_name == client_protocol_version_name
def test_get_protocol_version(self):
"""
`Connection.get_protocol_version()` returns an integer
giving the protocol version of the current connection.
"""
server, client = loopback()
client_protocol_version = client.get_protocol_version()
server_protocol_version = server.get_protocol_version()
assert isinstance(server_protocol_version, int)
assert isinstance(client_protocol_version, int)
assert server_protocol_version == client_protocol_version
def test_wantReadError(self):
"""
`Connection.bio_read` raises `OpenSSL.SSL.WantReadError` if there are
no bytes available to be read from the BIO.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(WantReadError):
conn.bio_read(1024)
@pytest.mark.parametrize('bufsize', [1.0, None, object(), 'bufsize'])
def test_bio_read_wrong_args(self, bufsize):
"""
`Connection.bio_read` raises `TypeError` if passed a non-integer
argument.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
with pytest.raises(TypeError):
conn.bio_read(bufsize)
def test_buffer_size(self):
"""
`Connection.bio_read` accepts an integer giving the maximum number
of bytes to read and return.
"""
ctx = Context(TLSv1_METHOD)
conn = Connection(ctx, None)
conn.set_connect_state()
try:
conn.do_handshake()
except WantReadError:
pass
data = conn.bio_read(2)
assert 2 == len(data)
class TestConnectionGetCipherList(object):
"""
Tests for `Connection.get_cipher_list`.
"""
def test_result(self):
"""
`Connection.get_cipher_list` returns a list of `bytes` giving the
names of the ciphers which might be used.
"""
connection = Connection(Context(TLSv1_METHOD), None)
ciphers = connection.get_cipher_list()
assert isinstance(ciphers, list)
for cipher in ciphers:
assert isinstance(cipher, str)
class VeryLarge(bytes):
"""
Mock object so that we don't have to allocate 2**31 bytes
"""
def __len__(self):
return 2**31
class TestConnectionSend(object):
"""
Tests for `Connection.send`.
"""
def test_wrong_args(self):
"""
When called with arguments other than string argument for its first
parameter, `Connection.send` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.send(object())
def test_short_bytes(self):
"""
When passed a short byte string, `Connection.send` transmits all of it
and returns the number of bytes sent.
"""
server, client = loopback()
count = server.send(b'xy')
assert count == 2
assert client.recv(2) == b'xy'
def test_text(self):
"""
When passed a text, `Connection.send` transmits all of it and
returns the number of bytes sent. It also raises a DeprecationWarning.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
count = server.send(b"xy".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert count == 2
assert client.recv(2) == b'xy'
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(memoryview(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@skip_if_py3
def test_short_buffer(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.send` transmits all of them and returns the number
of bytes sent.
"""
server, client = loopback()
count = server.send(buffer(b'xy'))
assert count == 2
assert client.recv(2) == b'xy'
@pytest.mark.skipif(
sys.maxsize < 2**31,
reason="sys.maxsize < 2**31 - test requires 64 bit"
)
def test_buf_too_large(self):
"""
When passed a buffer containing >= 2**31 bytes,
`Connection.send` bails out as SSL_write only
accepts an int for the buffer length.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(ValueError) as exc_info:
connection.send(VeryLarge())
exc_info.match(r"Cannot send more than .+ bytes at once")
def _make_memoryview(size):
"""
Create a new ``memoryview`` wrapped around a ``bytearray`` of the given
size.
"""
return memoryview(bytearray(size))
class TestConnectionRecvInto(object):
"""
Tests for `Connection.recv_into`.
"""
def _no_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`,
whatever bytes are available to be received that fit into that buffer
are written into that buffer.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'xy')
assert client.recv_into(output_buffer) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_bytearray_no_length(self):
"""
`Connection.recv_into` can be passed a `bytearray` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(bytearray)
def _respects_length_test(self, factory):
"""
Assert that when the given buffer is passed to `Connection.recv_into`
along with a value for `nbytes` that is less than the size of that
buffer, only `nbytes` bytes are written into the buffer.
"""
output_buffer = factory(10)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer, 5) == 5
assert output_buffer == bytearray(b'abcde\x00\x00\x00\x00\x00')
def test_bytearray_respects_length(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the `nbytes` parameter and doesn't copy in more than that
number of bytes.
"""
self._respects_length_test(bytearray)
def _doesnt_overfill_test(self, factory):
"""
Assert that if there are more bytes available to be read from the
receive buffer than would fit into the buffer passed to
`Connection.recv_into`, only as many as fit are written into it.
"""
output_buffer = factory(5)
server, client = loopback()
server.send(b'abcdefghij')
assert client.recv_into(output_buffer) == 5
assert output_buffer == bytearray(b'abcde')
rest = client.recv(5)
assert b'fghij' == rest
def test_bytearray_doesnt_overfill(self):
"""
When called with a `bytearray` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_bytearray_really_doesnt_overfill(self):
"""
When called with a `bytearray` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(bytearray)
def test_peek(self):
server, client = loopback()
server.send(b'xy')
for _ in range(2):
output_buffer = bytearray(5)
assert client.recv_into(output_buffer, flags=MSG_PEEK) == 2
assert output_buffer == bytearray(b'xy\x00\x00\x00')
def test_memoryview_no_length(self):
"""
`Connection.recv_into` can be passed a `memoryview` instance and data
in the receive buffer is written to it.
"""
self._no_length_test(_make_memoryview)
def test_memoryview_respects_length(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the ``nbytes`` parameter and doesn't copy more than that
number of bytes in.
"""
self._respects_length_test(_make_memoryview)
def test_memoryview_doesnt_overfill(self):
"""
When called with a `memoryview` instance, `Connection.recv_into`
respects the size of the array and doesn't write more bytes into it
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
def test_memoryview_really_doesnt_overfill(self):
"""
When called with a `memoryview` instance and an `nbytes` value that is
too large, `Connection.recv_into` respects the size of the array and
not the `nbytes` value and doesn't write more bytes into the buffer
than will fit.
"""
self._doesnt_overfill_test(_make_memoryview)
class TestConnectionSendall(object):
"""
Tests for `Connection.sendall`.
"""
def test_wrong_args(self):
"""
When called with arguments other than a string argument for its first
parameter, `Connection.sendall` raises `TypeError`.
"""
connection = Connection(Context(TLSv1_METHOD), None)
with pytest.raises(TypeError):
connection.sendall(object())
def test_short(self):
"""
`Connection.sendall` transmits all of the bytes in the string
passed to it.
"""
server, client = loopback()
server.sendall(b'x')
assert client.recv(1) == b'x'
def test_text(self):
"""
`Connection.sendall` transmits all the content in the string passed
to it, raising a DeprecationWarning in case of this being a text.
"""
server, client = loopback()
with pytest.warns(DeprecationWarning) as w:
simplefilter("always")
server.sendall(b"x".decode("ascii"))
assert (
"{0} for buf is no longer accepted, use bytes".format(
WARNING_TYPE_EXPECTED
) == str(w[-1].message))
assert client.recv(1) == b"x"
def test_short_memoryview(self):
"""
When passed a memoryview onto a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(memoryview(b'x'))
assert client.recv(1) == b'x'
@skip_if_py3
def test_short_buffers(self):
"""
When passed a buffer containing a small number of bytes,
`Connection.sendall` transmits all of them.
"""
server, client = loopback()
server.sendall(buffer(b'x'))
assert client.recv(1) == b'x'
def test_long(self):
"""
`Connection.sendall` transmits all the bytes in the string passed to it
even if this requires multiple calls of an underlying write function.
"""
server, client = loopback()
# Should be enough, underlying SSL_write should only do 16k at a time.
# On Windows, after 32k of bytes the write will block (forever
# - because no one is yet reading).
message = b'x' * (1024 * 32 - 1) + b'y'
server.sendall(message)
accum = []
received = 0
while received < len(message):
data = client.recv(1024)
accum.append(data)
received += len(data)
assert message == b''.join(accum)
def test_closed(self):
"""
If the underlying socket is closed, `Connection.sendall` propagates the
write error from the low level write call.
"""
server, client = loopback()
server.sock_shutdown(2)
with pytest.raises(SysCallError) as err:
server.sendall(b"hello, world")
if platform == "win32":
assert err.value.args[0] == ESHUTDOWN
else:
assert err.value.args[0] == EPIPE
class TestConnectionRenegotiate(object):
"""
Tests for SSL renegotiation APIs.
"""
def test_total_renegotiations(self):
"""
`Connection.total_renegotiations` returns `0` before any renegotiations
have happened.
"""
connection = Connection(Context(TLSv1_METHOD), None)
assert connection.total_renegotiations() == 0
def test_renegotiate(self):
"""
Go through a complete renegotiation cycle.
"""
server, client = loopback(
lambda s: loopback_server_factory(s, TLSv1_2_METHOD),
lambda s: loopback_client_factory(s, TLSv1_2_METHOD),
)
server.send(b"hello world")
assert b"hello world" == client.recv(len(b"hello world"))
assert 0 == server.total_renegotiations()
assert False is server.renegotiate_pending()
assert True is server.renegotiate()
assert True is server.renegotiate_pending()
server.setblocking(False)
client.setblocking(False)
client.do_handshake()
server.do_handshake()
assert 1 == server.total_renegotiations()
while False is server.renegotiate_pending():
pass
class TestError(object):
"""
Unit tests for `OpenSSL.SSL.Error`.
"""
def test_type(self):
"""
`Error` is an exception type.
"""
assert issubclass(Error, Exception)
assert Error.__name__ == 'Error'
class TestConstants(object):
"""
Tests for the values of constants exposed in `OpenSSL.SSL`.
These are values defined by OpenSSL intended only to be used as flags to
OpenSSL APIs. The only assertions it seems can be made about them is
their values.
"""
@pytest.mark.skipif(
OP_NO_QUERY_MTU is None,
reason="OP_NO_QUERY_MTU unavailable - OpenSSL version may be too old"
)
def test_op_no_query_mtu(self):
"""
The value of `OpenSSL.SSL.OP_NO_QUERY_MTU` is 0x1000, the value
of `SSL_OP_NO_QUERY_MTU` defined by `openssl/ssl.h`.
"""
assert OP_NO_QUERY_MTU == 0x1000
@pytest.mark.skipif(
OP_COOKIE_EXCHANGE is None,
reason="OP_COOKIE_EXCHANGE unavailable - "
"OpenSSL version may be too old"
)
def test_op_cookie_exchange(self):
"""
The value of `OpenSSL.SSL.OP_COOKIE_EXCHANGE` is 0x2000, the
value of `SSL_OP_COOKIE_EXCHANGE` defined by `openssl/ssl.h`.
"""
assert OP_COOKIE_EXCHANGE == 0x2000
@pytest.mark.skipif(
OP_NO_TICKET is None,
reason="OP_NO_TICKET unavailable - OpenSSL version may be too old"
)
def test_op_no_ticket(self):
"""
The value of `OpenSSL.SSL.OP_NO_TICKET` is 0x4000, the value of
`SSL_OP_NO_TICKET` defined by `openssl/ssl.h`.
"""
assert OP_NO_TICKET == 0x4000
@pytest.mark.skipif(
OP_NO_COMPRESSION is None,
reason="OP_NO_COMPRESSION unavailable - OpenSSL version may be too old"
)
def test_op_no_compression(self):
"""
The value of `OpenSSL.SSL.OP_NO_COMPRESSION` is 0x20000, the
value of `SSL_OP_NO_COMPRESSION` defined by `openssl/ssl.h`.
"""
assert OP_NO_COMPRESSION == 0x20000
def test_sess_cache_off(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_OFF` 0x0, the value of
`SSL_SESS_CACHE_OFF` defined by `openssl/ssl.h`.
"""
assert 0x0 == SESS_CACHE_OFF
def test_sess_cache_client(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_CLIENT` 0x1, the value of
`SSL_SESS_CACHE_CLIENT` defined by `openssl/ssl.h`.
"""
assert 0x1 == SESS_CACHE_CLIENT
def test_sess_cache_server(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_SERVER` 0x2, the value of
`SSL_SESS_CACHE_SERVER` defined by `openssl/ssl.h`.
"""
assert 0x2 == SESS_CACHE_SERVER
def test_sess_cache_both(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_BOTH` 0x3, the value of
`SSL_SESS_CACHE_BOTH` defined by `openssl/ssl.h`.
"""
assert 0x3 == SESS_CACHE_BOTH
def test_sess_cache_no_auto_clear(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_AUTO_CLEAR` 0x80, the
value of `SSL_SESS_CACHE_NO_AUTO_CLEAR` defined by
`openssl/ssl.h`.
"""
assert 0x80 == SESS_CACHE_NO_AUTO_CLEAR
def test_sess_cache_no_internal_lookup(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_LOOKUP` 0x100,
the value of `SSL_SESS_CACHE_NO_INTERNAL_LOOKUP` defined by
`openssl/ssl.h`.
"""
assert 0x100 == SESS_CACHE_NO_INTERNAL_LOOKUP
def test_sess_cache_no_internal_store(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL_STORE` 0x200,
the value of `SSL_SESS_CACHE_NO_INTERNAL_STORE` defined by
`openssl/ssl.h`.
"""
assert 0x200 == SESS_CACHE_NO_INTERNAL_STORE
def test_sess_cache_no_internal(self):
"""
The value of `OpenSSL.SSL.SESS_CACHE_NO_INTERNAL` 0x300, the
value of `SSL_SESS_CACHE_NO_INTERNAL` defined by
`openssl/ssl.h`.
"""
assert 0x300 == SESS_CACHE_NO_INTERNAL
class TestMemoryBIO(object):
"""
Tests for `OpenSSL.SSL.Connection` using a memory BIO.
"""
def _server(self, sock):
"""
Create a new server-side SSL `Connection` object wrapped around `sock`.
"""
# Create the server side Connection. This is mostly setup boilerplate
# - use TLSv1, use a particular certificate, etc.
server_ctx = Context(TLSv1_METHOD)
server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
server_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
server_store = server_ctx.get_cert_store()
server_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, server_key_pem))
server_ctx.use_certificate(
load_certificate(FILETYPE_PEM, server_cert_pem))
server_ctx.check_privatekey()
server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
# Here the Connection is actually created. If None is passed as the
# 2nd parameter, it indicates a memory BIO should be created.
server_conn = Connection(server_ctx, sock)
server_conn.set_accept_state()
return server_conn
def _client(self, sock):
"""
Create a new client-side SSL `Connection` object wrapped around `sock`.
"""
# Now create the client side Connection. Similar boilerplate to the
# above.
client_ctx = Context(TLSv1_METHOD)
client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)
client_ctx.set_verify(
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,
verify_cb
)
client_store = client_ctx.get_cert_store()
client_ctx.use_privatekey(
load_privatekey(FILETYPE_PEM, client_key_pem))
client_ctx.use_certificate(
load_certificate(FILETYPE_PEM, client_cert_pem))
client_ctx.check_privatekey()
client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))
client_conn = Connection(client_ctx, sock)
client_conn.set_connect_state()
return client_conn
def test_memory_connect(self):
"""
Two `Connection`s which use memory BIOs can be manually connected by
reading from the output of each and writing those bytes to the input of
the other and in this way establish a connection and exchange
application-level bytes with each other.
"""
server_conn = self._server(None)
client_conn = self._client(None)
# There should be no key or nonces yet.
assert server_conn.master_key() is None
assert server_conn.client_random() is None
assert server_conn.server_random() is None
# First, the handshake needs to happen. We'll deliver bytes back and
# forth between the client and server until neither of them feels like
# speaking any more.
assert interact_in_memory(client_conn, server_conn) is None
# Now that the handshake is done, there should be a key and nonces.
assert server_conn.master_key() is not None
assert server_conn.client_random() is not None
assert server_conn.server_random() is not None
assert server_conn.client_random() == client_conn.client_random()
assert server_conn.server_random() == client_conn.server_random()
assert server_conn.client_random() != server_conn.server_random()
assert client_conn.client_random() != client_conn.server_random()
# Export key material for other uses.
cekm = client_conn.export_keying_material(b'LABEL', 32)
sekm = server_conn.export_keying_material(b'LABEL', 32)
assert cekm is not None
assert sekm is not None
assert cekm == sekm
assert len(sekm) == 32
# Export key material for other uses with additional context.
cekmc = client_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
sekmc = server_conn.export_keying_material(b'LABEL', 32, b'CONTEXT')
assert cekmc is not None
assert sekmc is not None
assert cekmc == sekmc
assert cekmc != cekm
assert sekmc != sekm
# Export with alternate label
cekmt = client_conn.export_keying_material(b'test', 32, b'CONTEXT')
sekmt = server_conn.export_keying_material(b'test', 32, b'CONTEXT')
assert cekmc != cekmt
assert sekmc != sekmt
# Here are the bytes we'll try to send.
important_message = b'One if by land, two if by sea.'
server_conn.write(important_message)
assert (
interact_in_memory(client_conn, server_conn) ==
(client_conn, important_message))
client_conn.write(important_message[::-1])
assert (
interact_in_memory(client_conn, server_conn) ==
(server_conn, important_message[::-1]))
def test_socket_connect(self):
"""
Just like `test_memory_connect` but with an actual socket.
This is primarily to rule out the memory BIO code as the source of any
problems encountered while passing data over a `Connection` (if
this test fails, there must be a problem outside the memory BIO code,
as no memory BIO is involved here). Even though this isn't a memory
BIO test, it's convenient to have it here.
"""
server_conn, client_conn = loopback()
important_message = b"Help me Obi Wan Kenobi, you're my only hope."
client_conn.send(important_message)
msg = server_conn.recv(1024)
assert msg == important_message
# Again in the other direction, just for fun.
important_message = important_message[::-1]
server_conn.send(important_message)
msg = client_conn.recv(1024)
assert msg == important_message
def test_socket_overrides_memory(self):
"""
Test that `OpenSSL.SSL.bio_read` and `OpenSSL.SSL.bio_write` don't
work on `OpenSSL.SSL.Connection`() that use sockets.
"""
context = Context(TLSv1_METHOD)
client = socket_any_family()
clientSSL = Connection(context, client)
with pytest.raises(TypeError):
clientSSL.bio_read(100)
with pytest.raises(TypeError):
clientSSL.bio_write("foo")
with pytest.raises(TypeError):
clientSSL.bio_shutdown()
def test_outgoing_overflow(self):
"""
If more bytes than can be written to the memory BIO are passed to
`Connection.send` at once, the number of bytes which were written is
returned and that many bytes from the beginning of the input can be
read from the other end of the connection.
"""
server = self._server(None)
client = self._client(None)
interact_in_memory(client, server)
size = 2 ** 15
sent = client.send(b"x" * size)
# Sanity check. We're trying to test what happens when the entire
# input can't be sent. If the entire input was sent, this test is
# meaningless.
assert sent < size
receiver, received = interact_in_memory(client, server)
assert receiver is server
# We can rely on all of these bytes being received at once because
# loopback passes 2 ** 16 to recv - more than 2 ** 15.
assert len(received) == sent
def test_shutdown(self):
"""
`Connection.bio_shutdown` signals the end of the data stream
from which the `Connection` reads.
"""
server = self._server(None)
server.bio_shutdown()
with pytest.raises(Error) as err:
server.recv(1024)
# We don't want WantReadError or ZeroReturnError or anything - it's a
# handshake failure.
assert type(err.value) in [Error, SysCallError]
def test_unexpected_EOF(self):
"""
If the connection is lost before an orderly SSL shutdown occurs,
`OpenSSL.SSL.SysCallError` is raised with a message of
"Unexpected EOF".
"""
server_conn, client_conn = loopback()
client_conn.sock_shutdown(SHUT_RDWR)
with pytest.raises(SysCallError) as err:
server_conn.recv(1024)
assert err.value.args == (-1, "Unexpected EOF")
def _check_client_ca_list(self, func):
"""
Verify the return value of the `get_client_ca_list` method for
server and client connections.
:param func: A function which will be called with the server context
before the client and server are connected to each other. This
function should specify a list of CAs for the server to send to the
client and return that same list. The list will be used to verify
that `get_client_ca_list` returns the proper value at
various times.
"""
server = self._server(None)
client = self._client(None)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == []
ctx = server.get_context()
expected = func(ctx)
assert client.get_client_ca_list() == []
assert server.get_client_ca_list() == expected
interact_in_memory(client, server)
assert client.get_client_ca_list() == expected
assert server.get_client_ca_list() == expected
def test_set_client_ca_list_errors(self):
"""
`Context.set_client_ca_list` raises a `TypeError` if called with a
non-list or a list that contains objects other than X509Names.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.set_client_ca_list("spam")
with pytest.raises(TypeError):
ctx.set_client_ca_list(["spam"])
def test_set_empty_ca_list(self):
"""
If passed an empty list, `Context.set_client_ca_list` configures the
context to send no CA names to the client and, on both the server and
client sides, `Connection.get_client_ca_list` returns an empty list
after the connection is set up.
"""
def no_ca(ctx):
ctx.set_client_ca_list([])
return []
self._check_client_ca_list(no_ca)
def test_set_one_ca_list(self):
"""
If passed a list containing a single X509Name,
`Context.set_client_ca_list` configures the context to send
that CA name to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing that
X509Name after the connection is set up.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(single_ca)
def test_set_multiple_ca_list(self):
"""
If passed a list containing multiple X509Name objects,
`Context.set_client_ca_list` configures the context to send
those CA names to the client and, on both the server and client sides,
`Connection.get_client_ca_list` returns a list containing those
X509Names after the connection is set up.
"""
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def multiple_ca(ctx):
L = [sedesc, cldesc]
ctx.set_client_ca_list(L)
return L
self._check_client_ca_list(multiple_ca)
def test_reset_ca_list(self):
"""
If called multiple times, only the X509Names passed to the final call
of `Context.set_client_ca_list` are used to configure the CA
names sent to the client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def changed_ca(ctx):
ctx.set_client_ca_list([sedesc, cldesc])
ctx.set_client_ca_list([cadesc])
return [cadesc]
self._check_client_ca_list(changed_ca)
def test_mutated_ca_list(self):
"""
If the list passed to `Context.set_client_ca_list` is mutated
afterwards, this does not affect the list of CA names sent to the
client.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def mutated_ca(ctx):
L = [cadesc]
ctx.set_client_ca_list([cadesc])
L.append(sedesc)
return [cadesc]
self._check_client_ca_list(mutated_ca)
def test_add_client_ca_wrong_args(self):
"""
`Context.add_client_ca` raises `TypeError` if called with
a non-X509 object.
"""
ctx = Context(TLSv1_METHOD)
with pytest.raises(TypeError):
ctx.add_client_ca("spam")
def test_one_add_client_ca(self):
"""
A certificate's subject can be added as a CA to be sent to the client
with `Context.add_client_ca`.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
cadesc = cacert.get_subject()
def single_ca(ctx):
ctx.add_client_ca(cacert)
return [cadesc]
self._check_client_ca_list(single_ca)
def test_multiple_add_client_ca(self):
"""
Multiple CA names can be sent to the client by calling
`Context.add_client_ca` with multiple X509 objects.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def multiple_ca(ctx):
ctx.add_client_ca(cacert)
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(multiple_ca)
def test_set_and_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` followed by a call to
`Context.add_client_ca` results in using the CA names from the
first call and the CA name from the second call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
cldesc = clcert.get_subject()
def mixed_set_add_ca(ctx):
ctx.set_client_ca_list([cadesc, sedesc])
ctx.add_client_ca(clcert)
return [cadesc, sedesc, cldesc]
self._check_client_ca_list(mixed_set_add_ca)
def test_set_after_add_client_ca(self):
"""
A call to `Context.set_client_ca_list` after a call to
`Context.add_client_ca` replaces the CA name specified by the
former call with the names specified by the latter call.
"""
cacert = load_certificate(FILETYPE_PEM, root_cert_pem)
secert = load_certificate(FILETYPE_PEM, server_cert_pem)
clcert = load_certificate(FILETYPE_PEM, server_cert_pem)
cadesc = cacert.get_subject()
sedesc = secert.get_subject()
def set_replaces_add_ca(ctx):
ctx.add_client_ca(clcert)
ctx.set_client_ca_list([cadesc])
ctx.add_client_ca(secert)
return [cadesc, sedesc]
self._check_client_ca_list(set_replaces_add_ca)
class TestInfoConstants(object):
"""
Tests for assorted constants exposed for use in info callbacks.
"""
def test_integers(self):
"""
All of the info constants are integers.
This is a very weak test. It would be nice to have one that actually
verifies that as certain info events happen, the value passed to the
info callback matches up with the constant exposed by OpenSSL.SSL.
"""
for const in [
SSL_ST_CONNECT, SSL_ST_ACCEPT, SSL_ST_MASK,
SSL_CB_LOOP, SSL_CB_EXIT, SSL_CB_READ, SSL_CB_WRITE, SSL_CB_ALERT,
SSL_CB_READ_ALERT, SSL_CB_WRITE_ALERT, SSL_CB_ACCEPT_LOOP,
SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP, SSL_CB_CONNECT_EXIT,
SSL_CB_HANDSHAKE_START, SSL_CB_HANDSHAKE_DONE
]:
assert isinstance(const, int)
# These constants don't exist on OpenSSL 1.1.0
for const in [
SSL_ST_INIT, SSL_ST_BEFORE, SSL_ST_OK, SSL_ST_RENEGOTIATE
]:
assert const is None or isinstance(const, int)
class TestRequires(object):
"""
Tests for the decorator factory used to conditionally raise
NotImplementedError when older OpenSSLs are used.
"""
def test_available(self):
"""
When the OpenSSL functionality is available the decorated functions
work appropriately.
"""
feature_guard = _make_requires(True, "Error text")
results = []
@feature_guard
def inner():
results.append(True)
return True
assert inner() is True
assert [True] == results
def test_unavailable(self):
"""
When the OpenSSL functionality is not available the decorated function
does not execute and NotImplementedError is raised.
"""
feature_guard = _make_requires(False, "Error text")
@feature_guard
def inner(): # pragma: nocover
pytest.fail("Should not be called")
with pytest.raises(NotImplementedError) as e:
inner()
assert "Error text" in str(e.value)
class TestOCSP(object):
"""
Tests for PyOpenSSL's OCSP stapling support.
"""
sample_ocsp_data = b"this is totally ocsp data"
def _client_connection(self, callback, data, request_ocsp=True):
"""
Builds a client connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
:param request_ocsp: Whether the client will actually ask for OCSP
stapling. Useful for testing only.
"""
ctx = Context(SSLv23_METHOD)
ctx.set_ocsp_client_callback(callback, data)
client = Connection(ctx)
if request_ocsp:
client.request_ocsp()
client.set_connect_state()
return client
def _server_connection(self, callback, data):
"""
Builds a server connection suitable for using OCSP.
:param callback: The callback to register for OCSP.
:param data: The opaque data object that will be handed to the
OCSP callback.
"""
ctx = Context(SSLv23_METHOD)
ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))
ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))
ctx.set_ocsp_server_callback(callback, data)
server = Connection(ctx)
server.set_accept_state()
return server
def test_callbacks_arent_called_by_default(self):
"""
If both the client and the server have registered OCSP callbacks, but
the client does not send the OCSP request, neither callback gets
called.
"""
def ocsp_callback(*args, **kwargs): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(
callback=ocsp_callback, data=None, request_ocsp=False
)
server = self._server_connection(callback=ocsp_callback, data=None)
handshake_in_memory(client, server)
def test_client_negotiates_without_server(self):
"""
If the client wants to do OCSP but the server does not, the handshake
succeeds, and the client callback fires with an empty byte string.
"""
called = []
def ocsp_callback(conn, ocsp_data, ignored):
called.append(ocsp_data)
return True
client = self._client_connection(callback=ocsp_callback, data=None)
server = loopback_server_factory(socket=None)
handshake_in_memory(client, server)
assert len(called) == 1
assert called[0] == b''
def test_client_receives_servers_data(self):
"""
The data the server sends in its callback is received by the client.
"""
calls = []
def server_callback(*args, **kwargs):
return self.sample_ocsp_data
def client_callback(conn, ocsp_data, ignored):
calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(calls) == 1
assert calls[0] == self.sample_ocsp_data
def test_callbacks_are_invoked_with_connections(self):
"""
The first arguments to both callbacks are their respective connections.
"""
client_calls = []
server_calls = []
def client_callback(conn, *args, **kwargs):
client_calls.append(conn)
return True
def server_callback(conn, *args, **kwargs):
server_calls.append(conn)
return self.sample_ocsp_data
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert len(server_calls) == 1
assert client_calls[0] is client
assert server_calls[0] is server
def test_opaque_data_is_passed_through(self):
"""
Both callbacks receive an opaque, user-provided piece of data in their
callbacks as the final argument.
"""
calls = []
def server_callback(*args):
calls.append(args)
return self.sample_ocsp_data
def client_callback(*args):
calls.append(args)
return True
sentinel = object()
client = self._client_connection(
callback=client_callback, data=sentinel
)
server = self._server_connection(
callback=server_callback, data=sentinel
)
handshake_in_memory(client, server)
assert len(calls) == 2
assert calls[0][-1] is sentinel
assert calls[1][-1] is sentinel
def test_server_returns_empty_string(self):
"""
If the server returns an empty bytestring from its callback, the
client callback is called with the empty bytestring.
"""
client_calls = []
def server_callback(*args):
return b''
def client_callback(conn, ocsp_data, ignored):
client_calls.append(ocsp_data)
return True
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
handshake_in_memory(client, server)
assert len(client_calls) == 1
assert client_calls[0] == b''
def test_client_returns_false_terminates_handshake(self):
"""
If the client returns False from its callback, the handshake fails.
"""
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
return False
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(Error):
handshake_in_memory(client, server)
def test_exceptions_in_client_bubble_up(self):
"""
The callbacks thrown in the client callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
return self.sample_ocsp_data
def client_callback(*args):
raise SentinelException()
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_exceptions_in_server_bubble_up(self):
"""
The callbacks thrown in the server callback bubble up to the caller.
"""
class SentinelException(Exception):
pass
def server_callback(*args):
raise SentinelException()
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(SentinelException):
handshake_in_memory(client, server)
def test_server_must_return_bytes(self):
"""
The server callback must return a bytestring, or a TypeError is thrown.
"""
def server_callback(*args):
return self.sample_ocsp_data.decode('ascii')
def client_callback(*args): # pragma: nocover
pytest.fail("Should not be called")
client = self._client_connection(callback=client_callback, data=None)
server = self._server_connection(callback=server_callback, data=None)
with pytest.raises(TypeError):
handshake_in_memory(client, server)
|
_GenerateCategories
|
Generates category string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the category string for each app
in apps, each populated with the appropriate app-specific strings.
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Generates a Group Policy admx/adml template file for Google Update policies.
The resulting strings and files use CRLF as required by gpedit.msc.
To unit test this module, just run the file from the command line.
"""
from __future__ import print_function
import codecs
import filecmp
import os
import re
import sys
MAIN_POLICY_KEY = r'Software\Policies\HuhiSoftware\Update'
ADMX_HEADER = '<policyDefinitions revision="1.0" schemaVersion="1.0">'
ADMX_ENVIRONMENT = '''
<policyNamespaces>
<target namespace="Google.Policies.Update" prefix="update"/>
<using namespace="Google.Policies" prefix="Google"/>
<using prefix="windows" namespace="Microsoft.Policies.Windows" />
</policyNamespaces>
<supersededAdm fileName="GoogleUpdate.adm" />
<resources minRequiredRevision="1.0" />
<supportedOn>
<definitions>
<definition name="Sup_GoogleUpdate1_2_145_5"
displayName="$(string.Sup_GoogleUpdate1_2_145_5)" />
<definition name="Sup_GoogleUpdate1_3_21_81"
displayName="$(string.Sup_GoogleUpdate1_3_21_81)" />
<definition name="Sup_GoogleUpdate1_3_26_0"
displayName="$(string.Sup_GoogleUpdate1_3_26_0)" />
<definition name="Sup_GoogleUpdate1_3_33_5"
displayName="$(string.Sup_GoogleUpdate1_3_33_5)" />
<definition name="Sup_GoogleUpdate1_3_34_3"
displayName="$(string.Sup_GoogleUpdate1_3_34_3)" />
</definitions>
</supportedOn>
'''
ADMX_CATEGORIES = r'''
<categories>
<category name="Cat_GoogleUpdate" displayName="$(string.Cat_GoogleUpdate)"
explainText="$(string.Explain_GoogleUpdate)">
<parentCategory ref="Google:Cat_Google" />
</category>
<category name="Cat_Preferences" displayName="$(string.Cat_Preferences)"
explainText="$(string.Explain_Preferences)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_ProxyServer" displayName="$(string.Cat_ProxyServer)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_Applications" displayName="$(string.Cat_Applications)"
explainText="$(string.Explain_Applications)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
%(AppCategorList)s
</categories>
'''
ADMX_POLICIES = r'''
<policies>
<policy name="Pol_AutoUpdateCheckPeriod" class="Machine"
displayName="$(string.Pol_AutoUpdateCheckPeriod)"
explainText="$(string.Explain_AutoUpdateCheckPeriod)"
presentation="$(presentation.Pol_AutoUpdateCheckPeriod)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<decimal id="Part_AutoUpdateCheckPeriod"
key="%(RootPolicyKey)s"
valueName="AutoUpdateCheckPeriodMinutes"
required="true" minValue="0" maxValue="43200" />
</elements>
</policy>
<policy name="Pol_DownloadPreference" class="Machine"
displayName="$(string.Pol_DownloadPreference)"
explainText="$(string.Explain_DownloadPreference)"
presentation="$(presentation.Pol_DownloadPreference)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_26_0" />
<elements>
<enum id="Part_DownloadPreference" key="%(RootPolicyKey)s"
valueName="DownloadPreference">
<item displayName="$(string.DownloadPreference_DropDown)">
<value>
<string>cacheable</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdateCheckSuppressedPeriod" class="Machine"
displayName="$(string.Pol_UpdateCheckSuppressedPeriod)"
explainText="$(string.Explain_UpdateCheckSuppressedPeriod)"
presentation="$(presentation.Pol_UpdateCheckSuppressedPeriod)"
key="Software\Policies\Google\Update">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<decimal id="Part_UpdateCheckSuppressedStartHour"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartHour"
required="true" minValue="0" maxValue="23" />
<decimal id="Part_UpdateCheckSuppressedStartMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartMin"
required="true" minValue="0" maxValue="59" />
<decimal id="Part_UpdateCheckSuppressedDurationMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedDurationMin"
required="true" minValue="1" maxValue="960" />
</elements>
</policy>
<policy name="Pol_ProxyMode" class="Machine"
displayName="$(string.Pol_ProxyMode)"
explainText="$(string.Explain_ProxyMode)"
presentation="$(presentation.Pol_ProxyMode)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<enum id="Part_ProxyMode" key="%(RootPolicyKey)s"
valueName="ProxyMode">
<item displayName="$(string.ProxyDisabled_DropDown)">
<value>
<string>direct</string>
</value>
</item>
<item displayName="$(string.ProxyAutoDetect_DropDown)">
<value>
<string>auto_detect</string>
</value>
</item>
<item displayName="$(string.ProxyPacScript_DropDown)">
<value>
<string>pac_script</string>
</value>
</item>
<item displayName="$(string.ProxyFixedServers_DropDown)">
<value>
<string>fixed_servers</string>
</value>
</item>
<item displayName="$(string.ProxyUseSystem_DropDown)">
<value>
<string>system</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_ProxyServer" class="Machine"
displayName="$(string.Pol_ProxyServer)"
explainText="$(string.Explain_ProxyServer)"
presentation="$(presentation.Pol_ProxyServer)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyServer" valueName="ProxyServer" />
</elements>
</policy>
<policy name="Pol_ProxyPacUrl" class="Machine"
displayName="$(string.Pol_ProxyPacUrl)"
explainText="$(string.Explain_ProxyPacUrl)"
presentation="$(presentation.Pol_ProxyPacUrl)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyPacUrl" valueName="ProxyPacUrl" />
</elements>
</policy>
<policy name="Pol_DefaultAllowInstallation" class="Machine"
displayName="$(string.Pol_DefaultAllowInstallation)"
explainText="$(string.Explain_DefaultAllowInstallation)"
presentation="$(presentation.Pol_DefaultAllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy" key="%(RootPolicyKey)s"
valueName="InstallDefault" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_DefaultUpdatePolicy" class="Machine"
displayName="$(string.Pol_DefaultUpdatePolicy)"
explainText="$(string.Explain_DefaultUpdatePolicy)"
presentation="$(presentation.Pol_DefaultUpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy" key="%(RootPolicyKey)s"
valueName="UpdateDefault" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
%(AppPolicyList)s
</policies>
'''
ADMX_APP_POLICY_TEMPLATE = '''\
<policy name="Pol_AllowInstallation%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_AllowInstallation)"
explainText="$(string.Explain_Install%(AppLegalId)s)"
presentation="$(presentation.Pol_AllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy"
valueName="Install%(AppGuid)s" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdatePolicy%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_UpdatePolicy)"
explainText="$(string.Explain_AutoUpdate%(AppLegalId)s)"
presentation="$(presentation.Pol_UpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy"
valueName="Update%(AppGuid)s" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_TargetVersionPrefix%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_TargetVersionPrefix)"
explainText="$(string.Explain_TargetVersionPrefix%(AppLegalId)s)"
presentation="$(presentation.Pol_TargetVersionPrefix)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<text id="Part_TargetVersionPrefix"
valueName="TargetVersionPrefix%(AppGuid)s" />
</elements>
</policy>
<policy name="Pol_RollbackToTargetVersion%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_RollbackToTargetVersion)"
explainText="$(string.Explain_RollbackToTargetVersion%(AppLegalId)s)"
presentation="$(presentation.Pol_RollbackToTargetVersion)"
key="%(RootPolicyKey)s"
valueName="RollbackToTargetVersion%(AppGuid)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_34_3" />
<enabledValue><decimal value="1" /></enabledValue>
<disabledValue><decimal value="0" /></disabledValue>
</policy>'''
ADMX_FOOTER = '</policyDefinitions>'
def _CreateLegalIdentifier(input_string):
"""Converts input_string to a legal identifier for ADMX/ADML files.
Changes some characters that do not necessarily cause problems and may not
handle all cases.
Args:
input_string: Text to convert to a legal identifier.
Returns:
String containing a legal identifier based on input_string.
"""
return re.sub(r'[\W_]', '', input_string)
def GenerateGroupPolicyTemplateAdmx(apps):
"""Generates a Group Policy template (ADMX format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADMX file.
"""
# MASKED: _GenerateCategories function (lines 385-409)
def _GeneratePolicies(apps):
"""Generates policy string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the policy template for each app
in apps, each populated with the appropriate app-specific strings.
"""
app_policy_list = []
for app in apps:
app_name, app_guid, _, _ = app
app_policy_list.append(ADMX_APP_POLICY_TEMPLATE % {
'AppLegalId': _CreateLegalIdentifier(app_name),
'AppGuid': app_guid,
'RootPolicyKey': MAIN_POLICY_KEY,
})
return ADMX_POLICIES % {
'AppPolicyList': '\n'.join(app_policy_list),
'RootPolicyKey': MAIN_POLICY_KEY,
}
target_contents = [
ADMX_HEADER,
ADMX_ENVIRONMENT,
_GenerateCategories(apps),
_GeneratePolicies(apps),
ADMX_FOOTER,
]
return ''.join(target_contents)
ADML_HEADER = '''\
<policyDefinitionResources revision="1.0" schemaVersion="1.0">
'''
ADML_ENVIRONMENT = '''\
<displayName>
</displayName>
<description>
</description>
'''
ADML_DEFAULT_ROLLBACK_DISCLAIMER = (
'This policy is meant to serve as temporary measure when Enterprise '
'Administrators need to downgrade for business reasons. To ensure '
'users are protected by the latest security updates, the most recent '
'version should be used. When versions are downgraded to older '
'versions, there could be incompatibilities.')
ADML_DOMAIN_REQUIREMENT_EN = (
'This policy is available only on Windows instances that are joined to a '
'Microsoft® Active Directory® domain.')
ADML_PREDEFINED_STRINGS_TABLE_EN = [
('Sup_GoogleUpdate1_2_145_5', 'At least Google Update 1.2.145.5'),
('Sup_GoogleUpdate1_3_21_81', 'At least Google Update 1.3.21.81'),
('Sup_GoogleUpdate1_3_26_0', 'At least Google Update 1.3.26.0'),
('Sup_GoogleUpdate1_3_33_5', 'At least Google Update 1.3.33.5'),
('Sup_GoogleUpdate1_3_34_3', 'At least Google Update 1.3.34.3'),
('Cat_GoogleUpdate', 'Google Update'),
('Cat_Preferences', 'Preferences'),
('Cat_ProxyServer', 'Proxy Server'),
('Cat_Applications', 'Applications'),
('Pol_AutoUpdateCheckPeriod', 'Auto-update check period override'),
('Pol_UpdateCheckSuppressedPeriod',
'Time period in each day to suppress auto-update check'),
('Pol_DownloadPreference', 'Download URL class override'),
('DownloadPreference_DropDown', 'Cacheable download URLs'),
('Pol_ProxyMode', 'Choose how to specify proxy server settings'),
('Pol_ProxyServer', 'Address or URL of proxy server'),
('Pol_ProxyPacUrl', 'URL to a proxy .pac file'),
('Pol_DefaultAllowInstallation', 'Allow installation default'),
('Pol_AllowInstallation', 'Allow installation'),
('Pol_DefaultUpdatePolicy', 'Update policy override default'),
('Pol_UpdatePolicy', 'Update policy override'),
('Pol_TargetVersionPrefix', 'Target version prefix override'),
('Pol_RollbackToTargetVersion', 'Rollback to Target version'),
('Part_AutoUpdateCheckPeriod', 'Minutes between update checks'),
('Part_UpdateCheckSuppressedStartHour',
'Hour in a day that start to suppress update check'),
('Part_UpdateCheckSuppressedStartMin',
'Minute in hour that starts to suppress update check'),
('Part_UpdateCheckSuppressedDurationMin',
'Number of minutes to suppress update check each day'),
('Part_ProxyMode', 'Choose how to specify proxy server settings'),
('Part_ProxyServer', 'Address or URL of proxy server'),
('Part_ProxyPacUrl', 'URL to a proxy .pac file'),
('Part_InstallPolicy', 'Policy'),
('Name_InstallsEnabled', 'Always allow Installs (recommended)'),
('Name_InstallsEnabledMachineOnly',
'Always allow Machine-Wide Installs, but not Per-User Installs.'),
('Name_InstallsDisabled', 'Installs disabled'),
('Part_UpdatePolicy', 'Policy'),
('Part_TargetVersionPrefix', 'Target version prefix'),
('Name_UpdatesEnabled', 'Always allow updates (recommended)'),
('Name_ManualUpdatesOnly', 'Manual updates only'),
('Name_AutomaticUpdatesOnly', 'Automatic silent updates only'),
('Name_UpdatesDisabled', 'Updates disabled'),
('ProxyDisabled_DropDown', 'Never use a proxy'),
('ProxyAutoDetect_DropDown', 'Auto detect proxy settings'),
('ProxyPacScript_DropDown', 'Use a .pac proxy script'),
('ProxyFixedServers_DropDown', 'Use fixed proxy servers'),
('ProxyUseSystem_DropDown', 'Use system proxy settings'),
('Explain_GoogleUpdate',
'Policies to control the installation and updating of Google applications '
'that use Google Update/Google Installer.'),
('Explain_Preferences', 'General policies for Google Update.'),
('Explain_AutoUpdateCheckPeriod',
'Minimum number of minutes between automatic update checks.\n\n'
'Set the value to 0 if you want to disable all auto-update checks '
'(not recommended).\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DownloadPreference',
'If enabled, the Google Update server will attempt to provide '
'cache-friendly URLs for update payloads in its responses.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_UpdateCheckSuppressedPeriod',
'If this setting is enabled, update checks will be suppressed during '
'each day starting from Hour:Minute for a period of Duration (in minutes).'
' Duration does not account for daylight savings time. So for instance, '
'if the start time is 22:00, and with a duration of 480 minutes, the '
'updates will be suppressed for 8 hours regardless of whether daylight '
'savings time changes happen in between.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyMode',
'Allows you to specify the proxy server used by Google Update.\n\n'
'If you choose to never use a proxy server and always connect directly, '
'all other options are ignored.\n\n'
'If you choose to use system proxy settings or auto detect the proxy '
'server, all other options are ignored.\n\n'
'If you choose fixed server proxy mode, you can specify further options '
'in \'Address or URL of proxy server\'.\n\n'
'If you choose to use a .pac proxy script, you must specify the URL to '
'the script in \'URL to a proxy .pac file\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyServer',
'You can specify the URL of the proxy server here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyPacUrl',
'You can specify a URL to a proxy .pac file here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_Applications', 'Policies for individual applications.\n\n'
'An updated ADMX/ADML template will be required to support '
'Google applications released in the future.'),
('Explain_DefaultAllowInstallation',
'Specifies the default behavior for whether Google software can be '
'installed using Google Update/Google Installer.\n\n'
'Can be overridden by the "Allow installation" for individual '
'applications.\n\n'
'Only affects installation of Google software using Google Update/Google '
'Installer. Cannot prevent running the application installer directly or '
'installation of Google software that does not use Google Update/Google '
'Installer for installation.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DefaultUpdatePolicy',
'Specifies the default policy for software updates from Google.\n\n'
'Can be overridden by the "Update policy override" for individual '
'applications.\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, either '
'by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user does a '
'manual update check. (Not all apps provide an interface for this.)\n'
' - Automatic silent updates only: Updates are only applied when they are '
'found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for updates '
'using each application\'s manual update mechanism if available. If you '
'disable updates, you should periodically check for updates and '
'distribute them to users.\n\n'
'Only affects updates for Google software that uses Google Update for '
'updates. Does not prevent auto-updates of Google software that does not '
'use Google Update for updates.\n\n'
'Updates for Google Update are not affected by this setting; Google '
'Update will continue to update itself while it is installed.\n\n'
'WARNING: Disabing updates will also prevent updates of any new Google '
'applications released in the future, possibly including dependencies for '
'future versions of installed applications.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
]
ADML_PRESENTATIONS = '''\
<presentation id="Pol_AutoUpdateCheckPeriod">
<decimalTextBox refId="Part_AutoUpdateCheckPeriod" defaultValue="1400"
spinStep="60">Minutes between update checks</decimalTextBox>
</presentation>
<presentation id="Pol_UpdateCheckSuppressedPeriod">
<decimalTextBox refId="Part_UpdateCheckSuppressedStartHour"
defaultValue="0" spinStep="1">Hour</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedStartMin"
defaultValue="0" spinStep="1">Minute</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedDurationMin"
defaultValue="60">Duration</decimalTextBox>
</presentation>
<presentation id="Pol_DownloadPreference">
<dropdownList refId="Part_DownloadPreference"
defaultItem="0">Type of download URL to request</dropdownList>
</presentation>
<presentation id="Pol_ProxyMode">
<dropdownList refId="Part_ProxyMode"
defaultItem="0">Choose how to specify proxy server settings
</dropdownList>
</presentation>
<presentation id="Pol_ProxyServer">
<textBox refId="Part_ProxyServer">
<label>Address or URL of proxy server</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_ProxyPacUrl">
<textBox refId="Part_ProxyPacUrl">
<label>URL to a proxy .pac file</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_DefaultAllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_DefaultUpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_AllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_UpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_TargetVersionPrefix">
<textBox refId="Part_TargetVersionPrefix">
<label>Target version prefix</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_RollbackToTargetVersion" />
'''
ADML_RESOURCE_TABLE_TEMPLATE = '''
<resources>
<stringTable>
%s
</stringTable>
<presentationTable>
%s
</presentationTable>
</resources>
'''
ADML_FOOTER = '</policyDefinitionResources>'
def GenerateGroupPolicyTemplateAdml(apps):
"""Generates a Group Policy template (ADML format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADML file.
"""
string_definition_list = ADML_PREDEFINED_STRINGS_TABLE_EN[:]
for app in apps:
app_name = app[0]
app_legal_id = _CreateLegalIdentifier(app_name)
app_additional_help_msg = app[2]
rollback_disclaimer = app[3]
if not rollback_disclaimer:
rollback_disclaimer = ADML_DEFAULT_ROLLBACK_DISCLAIMER
app_category = ('Cat_' + app_legal_id, app_name)
string_definition_list.append(app_category)
app_install_policy_explanation = (
'Explain_Install' + app_legal_id,
'Specifies whether %s can be installed using Google Update/Google '
'Installer.\n\n'
'If this policy is not configured, %s can be installed as specified '
'by "Allow installation default".\n\n'
'%s' % (app_name, app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_install_policy_explanation)
app_auto_update_policy_explanation = (
'Explain_AutoUpdate' + app_legal_id,
'Specifies how Google Update handles available %s updates '
'from Google.\n\n'
'If this policy is not configured, Google Update handles available '
'updates as specified by "Update policy override default".\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, '
'either by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user '
'does a manual update check. (Not all apps provide an interface '
' for this.)\n'
' - Automatic silent updates only: Updates are only applied when '
'they are found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for '
'updates using the application\'s manual update mechanism if '
'available. If you disable updates, you should periodically check '
'for updates and distribute them to users.%s\n\n'
'%s' %
(app_name, app_additional_help_msg, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_auto_update_policy_explanation)
app_target_version_prefix_explanation = (
'Explain_TargetVersionPrefix' + app_legal_id,
'Specifies which version %s should be updated to.\n\n'
'When this policy is enabled, the app will be updated to the version '
'prefixed with this policy value.\n\nSome examples:\n'
'1) Not configured: app will be updated to the latest version '
'available.\n'
'2) Policy value is set to "55.": the app will be updated to any minor '
'version of 55 (e.g., 55.24.34 or 55.60.2).\n'
'3) Policy value is "55.2.": the app will be updated to any minor '
'version of 55.2 (e.g., 55.2.34 or 55.2.2).\n'
'4) Policy value is "55.24.34": the app will be updated to this '
'specific version only.\n\n'
'%s' % (app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_target_version_prefix_explanation)
app_rollback_to_target_version_explanation = (
'Explain_RollbackToTargetVersion' + app_legal_id,
'Specifies that Google Update should roll installations of %s back to '
'the version indicated by "Target version prefix override".\n\n'
'This policy setting has no effect unless "Target version prefix '
'override" is set.\n\n'
'If this policy is not configured or is disabled, installs that have a '
'version higher than that specified by "Target version prefix '
'override" will be left as-is.\n\n'
'If this policy is enabled, installs that have a version higher than '
'that specified by "Target version prefix override" will be downgraded '
'to the highest available version that matches the target version.\n\n'
'%s\n\n'
'%s' % (app_name, rollback_disclaimer, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_rollback_to_target_version_explanation)
app_resource_strings = []
for entry in string_definition_list:
app_resource_strings.append(' <string id="%s">%s</string>' %
(entry[0], entry[1]))
app_resource_tables = (ADML_RESOURCE_TABLE_TEMPLATE %
('\n'.join(app_resource_strings), ADML_PRESENTATIONS))
target_contents = [
ADML_HEADER,
ADML_ENVIRONMENT,
app_resource_tables,
ADML_FOOTER,
]
return ''.join(target_contents)
def WriteGroupPolicyTemplateAdmx(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdmx(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
def WriteGroupPolicyTemplateAdml(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdml(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
# Run a unit test when the module is run directly.
if __name__ == '__main__':
TEST_APPS = [
('Google Test Foo', '{D6B08267-B440-4c85-9F79-E195E80D9937}',
' Check http://www.google.com/test_foo/.',
'Disclaimer'),
(u'Google User Test Foo\u00a9\u00ae\u2122',
'{104844D6-7DDA-460b-89F0-FBF8AFDD0A67}',
' Check http://www.google.com/user_test_foo/.',
''),
]
module_dir = os.path.abspath(os.path.dirname(__file__))
gold_path = os.path.join(module_dir, 'test_gold.admx')
output_path = os.path.join(module_dir, 'test_out.admx')
WriteGroupPolicyTemplateAdmx(output_path, TEST_APPS)
admx_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not admx_files_equal:
print('FAIL: ADMX files are not equal.')
gold_path = os.path.join(module_dir, 'test_gold.adml')
output_path = os.path.join(module_dir, 'test_out.adml')
WriteGroupPolicyTemplateAdml(output_path, TEST_APPS)
adml_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not adml_files_equal:
print('FAIL: ADML files are not equal.')
if admx_files_equal and adml_files_equal:
print('SUCCESS. contents are equal')
else:
sys.exit(-1)
|
def _GenerateCategories(apps):
"""Generates category string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the category string for each app
in apps, each populated with the appropriate app-specific strings.
"""
admx_app_category_template = (
' <category name="Cat_%(AppLegalId)s"\n'
' displayName="$(string.Cat_%(AppLegalId)s)">\n'
' <parentCategory ref="Cat_Applications" />\n'
' </category>')
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append(admx_app_category_template % {
'AppLegalId': _CreateLegalIdentifier(app_name)
})
return ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)}
| 385 | 409 |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Generates a Group Policy admx/adml template file for Google Update policies.
The resulting strings and files use CRLF as required by gpedit.msc.
To unit test this module, just run the file from the command line.
"""
from __future__ import print_function
import codecs
import filecmp
import os
import re
import sys
MAIN_POLICY_KEY = r'Software\Policies\HuhiSoftware\Update'
ADMX_HEADER = '<policyDefinitions revision="1.0" schemaVersion="1.0">'
ADMX_ENVIRONMENT = '''
<policyNamespaces>
<target namespace="Google.Policies.Update" prefix="update"/>
<using namespace="Google.Policies" prefix="Google"/>
<using prefix="windows" namespace="Microsoft.Policies.Windows" />
</policyNamespaces>
<supersededAdm fileName="GoogleUpdate.adm" />
<resources minRequiredRevision="1.0" />
<supportedOn>
<definitions>
<definition name="Sup_GoogleUpdate1_2_145_5"
displayName="$(string.Sup_GoogleUpdate1_2_145_5)" />
<definition name="Sup_GoogleUpdate1_3_21_81"
displayName="$(string.Sup_GoogleUpdate1_3_21_81)" />
<definition name="Sup_GoogleUpdate1_3_26_0"
displayName="$(string.Sup_GoogleUpdate1_3_26_0)" />
<definition name="Sup_GoogleUpdate1_3_33_5"
displayName="$(string.Sup_GoogleUpdate1_3_33_5)" />
<definition name="Sup_GoogleUpdate1_3_34_3"
displayName="$(string.Sup_GoogleUpdate1_3_34_3)" />
</definitions>
</supportedOn>
'''
ADMX_CATEGORIES = r'''
<categories>
<category name="Cat_GoogleUpdate" displayName="$(string.Cat_GoogleUpdate)"
explainText="$(string.Explain_GoogleUpdate)">
<parentCategory ref="Google:Cat_Google" />
</category>
<category name="Cat_Preferences" displayName="$(string.Cat_Preferences)"
explainText="$(string.Explain_Preferences)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_ProxyServer" displayName="$(string.Cat_ProxyServer)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_Applications" displayName="$(string.Cat_Applications)"
explainText="$(string.Explain_Applications)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
%(AppCategorList)s
</categories>
'''
ADMX_POLICIES = r'''
<policies>
<policy name="Pol_AutoUpdateCheckPeriod" class="Machine"
displayName="$(string.Pol_AutoUpdateCheckPeriod)"
explainText="$(string.Explain_AutoUpdateCheckPeriod)"
presentation="$(presentation.Pol_AutoUpdateCheckPeriod)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<decimal id="Part_AutoUpdateCheckPeriod"
key="%(RootPolicyKey)s"
valueName="AutoUpdateCheckPeriodMinutes"
required="true" minValue="0" maxValue="43200" />
</elements>
</policy>
<policy name="Pol_DownloadPreference" class="Machine"
displayName="$(string.Pol_DownloadPreference)"
explainText="$(string.Explain_DownloadPreference)"
presentation="$(presentation.Pol_DownloadPreference)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_26_0" />
<elements>
<enum id="Part_DownloadPreference" key="%(RootPolicyKey)s"
valueName="DownloadPreference">
<item displayName="$(string.DownloadPreference_DropDown)">
<value>
<string>cacheable</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdateCheckSuppressedPeriod" class="Machine"
displayName="$(string.Pol_UpdateCheckSuppressedPeriod)"
explainText="$(string.Explain_UpdateCheckSuppressedPeriod)"
presentation="$(presentation.Pol_UpdateCheckSuppressedPeriod)"
key="Software\Policies\Google\Update">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<decimal id="Part_UpdateCheckSuppressedStartHour"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartHour"
required="true" minValue="0" maxValue="23" />
<decimal id="Part_UpdateCheckSuppressedStartMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartMin"
required="true" minValue="0" maxValue="59" />
<decimal id="Part_UpdateCheckSuppressedDurationMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedDurationMin"
required="true" minValue="1" maxValue="960" />
</elements>
</policy>
<policy name="Pol_ProxyMode" class="Machine"
displayName="$(string.Pol_ProxyMode)"
explainText="$(string.Explain_ProxyMode)"
presentation="$(presentation.Pol_ProxyMode)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<enum id="Part_ProxyMode" key="%(RootPolicyKey)s"
valueName="ProxyMode">
<item displayName="$(string.ProxyDisabled_DropDown)">
<value>
<string>direct</string>
</value>
</item>
<item displayName="$(string.ProxyAutoDetect_DropDown)">
<value>
<string>auto_detect</string>
</value>
</item>
<item displayName="$(string.ProxyPacScript_DropDown)">
<value>
<string>pac_script</string>
</value>
</item>
<item displayName="$(string.ProxyFixedServers_DropDown)">
<value>
<string>fixed_servers</string>
</value>
</item>
<item displayName="$(string.ProxyUseSystem_DropDown)">
<value>
<string>system</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_ProxyServer" class="Machine"
displayName="$(string.Pol_ProxyServer)"
explainText="$(string.Explain_ProxyServer)"
presentation="$(presentation.Pol_ProxyServer)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyServer" valueName="ProxyServer" />
</elements>
</policy>
<policy name="Pol_ProxyPacUrl" class="Machine"
displayName="$(string.Pol_ProxyPacUrl)"
explainText="$(string.Explain_ProxyPacUrl)"
presentation="$(presentation.Pol_ProxyPacUrl)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyPacUrl" valueName="ProxyPacUrl" />
</elements>
</policy>
<policy name="Pol_DefaultAllowInstallation" class="Machine"
displayName="$(string.Pol_DefaultAllowInstallation)"
explainText="$(string.Explain_DefaultAllowInstallation)"
presentation="$(presentation.Pol_DefaultAllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy" key="%(RootPolicyKey)s"
valueName="InstallDefault" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_DefaultUpdatePolicy" class="Machine"
displayName="$(string.Pol_DefaultUpdatePolicy)"
explainText="$(string.Explain_DefaultUpdatePolicy)"
presentation="$(presentation.Pol_DefaultUpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy" key="%(RootPolicyKey)s"
valueName="UpdateDefault" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
%(AppPolicyList)s
</policies>
'''
ADMX_APP_POLICY_TEMPLATE = '''\
<policy name="Pol_AllowInstallation%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_AllowInstallation)"
explainText="$(string.Explain_Install%(AppLegalId)s)"
presentation="$(presentation.Pol_AllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy"
valueName="Install%(AppGuid)s" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdatePolicy%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_UpdatePolicy)"
explainText="$(string.Explain_AutoUpdate%(AppLegalId)s)"
presentation="$(presentation.Pol_UpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy"
valueName="Update%(AppGuid)s" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_TargetVersionPrefix%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_TargetVersionPrefix)"
explainText="$(string.Explain_TargetVersionPrefix%(AppLegalId)s)"
presentation="$(presentation.Pol_TargetVersionPrefix)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<text id="Part_TargetVersionPrefix"
valueName="TargetVersionPrefix%(AppGuid)s" />
</elements>
</policy>
<policy name="Pol_RollbackToTargetVersion%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_RollbackToTargetVersion)"
explainText="$(string.Explain_RollbackToTargetVersion%(AppLegalId)s)"
presentation="$(presentation.Pol_RollbackToTargetVersion)"
key="%(RootPolicyKey)s"
valueName="RollbackToTargetVersion%(AppGuid)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_34_3" />
<enabledValue><decimal value="1" /></enabledValue>
<disabledValue><decimal value="0" /></disabledValue>
</policy>'''
ADMX_FOOTER = '</policyDefinitions>'
def _CreateLegalIdentifier(input_string):
"""Converts input_string to a legal identifier for ADMX/ADML files.
Changes some characters that do not necessarily cause problems and may not
handle all cases.
Args:
input_string: Text to convert to a legal identifier.
Returns:
String containing a legal identifier based on input_string.
"""
return re.sub(r'[\W_]', '', input_string)
def GenerateGroupPolicyTemplateAdmx(apps):
"""Generates a Group Policy template (ADMX format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADMX file.
"""
def _GenerateCategories(apps):
"""Generates category string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the category string for each app
in apps, each populated with the appropriate app-specific strings.
"""
admx_app_category_template = (
' <category name="Cat_%(AppLegalId)s"\n'
' displayName="$(string.Cat_%(AppLegalId)s)">\n'
' <parentCategory ref="Cat_Applications" />\n'
' </category>')
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append(admx_app_category_template % {
'AppLegalId': _CreateLegalIdentifier(app_name)
})
return ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)}
def _GeneratePolicies(apps):
"""Generates policy string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the policy template for each app
in apps, each populated with the appropriate app-specific strings.
"""
app_policy_list = []
for app in apps:
app_name, app_guid, _, _ = app
app_policy_list.append(ADMX_APP_POLICY_TEMPLATE % {
'AppLegalId': _CreateLegalIdentifier(app_name),
'AppGuid': app_guid,
'RootPolicyKey': MAIN_POLICY_KEY,
})
return ADMX_POLICIES % {
'AppPolicyList': '\n'.join(app_policy_list),
'RootPolicyKey': MAIN_POLICY_KEY,
}
target_contents = [
ADMX_HEADER,
ADMX_ENVIRONMENT,
_GenerateCategories(apps),
_GeneratePolicies(apps),
ADMX_FOOTER,
]
return ''.join(target_contents)
ADML_HEADER = '''\
<policyDefinitionResources revision="1.0" schemaVersion="1.0">
'''
ADML_ENVIRONMENT = '''\
<displayName>
</displayName>
<description>
</description>
'''
ADML_DEFAULT_ROLLBACK_DISCLAIMER = (
'This policy is meant to serve as temporary measure when Enterprise '
'Administrators need to downgrade for business reasons. To ensure '
'users are protected by the latest security updates, the most recent '
'version should be used. When versions are downgraded to older '
'versions, there could be incompatibilities.')
ADML_DOMAIN_REQUIREMENT_EN = (
'This policy is available only on Windows instances that are joined to a '
'Microsoft® Active Directory® domain.')
ADML_PREDEFINED_STRINGS_TABLE_EN = [
('Sup_GoogleUpdate1_2_145_5', 'At least Google Update 1.2.145.5'),
('Sup_GoogleUpdate1_3_21_81', 'At least Google Update 1.3.21.81'),
('Sup_GoogleUpdate1_3_26_0', 'At least Google Update 1.3.26.0'),
('Sup_GoogleUpdate1_3_33_5', 'At least Google Update 1.3.33.5'),
('Sup_GoogleUpdate1_3_34_3', 'At least Google Update 1.3.34.3'),
('Cat_GoogleUpdate', 'Google Update'),
('Cat_Preferences', 'Preferences'),
('Cat_ProxyServer', 'Proxy Server'),
('Cat_Applications', 'Applications'),
('Pol_AutoUpdateCheckPeriod', 'Auto-update check period override'),
('Pol_UpdateCheckSuppressedPeriod',
'Time period in each day to suppress auto-update check'),
('Pol_DownloadPreference', 'Download URL class override'),
('DownloadPreference_DropDown', 'Cacheable download URLs'),
('Pol_ProxyMode', 'Choose how to specify proxy server settings'),
('Pol_ProxyServer', 'Address or URL of proxy server'),
('Pol_ProxyPacUrl', 'URL to a proxy .pac file'),
('Pol_DefaultAllowInstallation', 'Allow installation default'),
('Pol_AllowInstallation', 'Allow installation'),
('Pol_DefaultUpdatePolicy', 'Update policy override default'),
('Pol_UpdatePolicy', 'Update policy override'),
('Pol_TargetVersionPrefix', 'Target version prefix override'),
('Pol_RollbackToTargetVersion', 'Rollback to Target version'),
('Part_AutoUpdateCheckPeriod', 'Minutes between update checks'),
('Part_UpdateCheckSuppressedStartHour',
'Hour in a day that start to suppress update check'),
('Part_UpdateCheckSuppressedStartMin',
'Minute in hour that starts to suppress update check'),
('Part_UpdateCheckSuppressedDurationMin',
'Number of minutes to suppress update check each day'),
('Part_ProxyMode', 'Choose how to specify proxy server settings'),
('Part_ProxyServer', 'Address or URL of proxy server'),
('Part_ProxyPacUrl', 'URL to a proxy .pac file'),
('Part_InstallPolicy', 'Policy'),
('Name_InstallsEnabled', 'Always allow Installs (recommended)'),
('Name_InstallsEnabledMachineOnly',
'Always allow Machine-Wide Installs, but not Per-User Installs.'),
('Name_InstallsDisabled', 'Installs disabled'),
('Part_UpdatePolicy', 'Policy'),
('Part_TargetVersionPrefix', 'Target version prefix'),
('Name_UpdatesEnabled', 'Always allow updates (recommended)'),
('Name_ManualUpdatesOnly', 'Manual updates only'),
('Name_AutomaticUpdatesOnly', 'Automatic silent updates only'),
('Name_UpdatesDisabled', 'Updates disabled'),
('ProxyDisabled_DropDown', 'Never use a proxy'),
('ProxyAutoDetect_DropDown', 'Auto detect proxy settings'),
('ProxyPacScript_DropDown', 'Use a .pac proxy script'),
('ProxyFixedServers_DropDown', 'Use fixed proxy servers'),
('ProxyUseSystem_DropDown', 'Use system proxy settings'),
('Explain_GoogleUpdate',
'Policies to control the installation and updating of Google applications '
'that use Google Update/Google Installer.'),
('Explain_Preferences', 'General policies for Google Update.'),
('Explain_AutoUpdateCheckPeriod',
'Minimum number of minutes between automatic update checks.\n\n'
'Set the value to 0 if you want to disable all auto-update checks '
'(not recommended).\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DownloadPreference',
'If enabled, the Google Update server will attempt to provide '
'cache-friendly URLs for update payloads in its responses.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_UpdateCheckSuppressedPeriod',
'If this setting is enabled, update checks will be suppressed during '
'each day starting from Hour:Minute for a period of Duration (in minutes).'
' Duration does not account for daylight savings time. So for instance, '
'if the start time is 22:00, and with a duration of 480 minutes, the '
'updates will be suppressed for 8 hours regardless of whether daylight '
'savings time changes happen in between.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyMode',
'Allows you to specify the proxy server used by Google Update.\n\n'
'If you choose to never use a proxy server and always connect directly, '
'all other options are ignored.\n\n'
'If you choose to use system proxy settings or auto detect the proxy '
'server, all other options are ignored.\n\n'
'If you choose fixed server proxy mode, you can specify further options '
'in \'Address or URL of proxy server\'.\n\n'
'If you choose to use a .pac proxy script, you must specify the URL to '
'the script in \'URL to a proxy .pac file\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyServer',
'You can specify the URL of the proxy server here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyPacUrl',
'You can specify a URL to a proxy .pac file here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_Applications', 'Policies for individual applications.\n\n'
'An updated ADMX/ADML template will be required to support '
'Google applications released in the future.'),
('Explain_DefaultAllowInstallation',
'Specifies the default behavior for whether Google software can be '
'installed using Google Update/Google Installer.\n\n'
'Can be overridden by the "Allow installation" for individual '
'applications.\n\n'
'Only affects installation of Google software using Google Update/Google '
'Installer. Cannot prevent running the application installer directly or '
'installation of Google software that does not use Google Update/Google '
'Installer for installation.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DefaultUpdatePolicy',
'Specifies the default policy for software updates from Google.\n\n'
'Can be overridden by the "Update policy override" for individual '
'applications.\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, either '
'by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user does a '
'manual update check. (Not all apps provide an interface for this.)\n'
' - Automatic silent updates only: Updates are only applied when they are '
'found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for updates '
'using each application\'s manual update mechanism if available. If you '
'disable updates, you should periodically check for updates and '
'distribute them to users.\n\n'
'Only affects updates for Google software that uses Google Update for '
'updates. Does not prevent auto-updates of Google software that does not '
'use Google Update for updates.\n\n'
'Updates for Google Update are not affected by this setting; Google '
'Update will continue to update itself while it is installed.\n\n'
'WARNING: Disabing updates will also prevent updates of any new Google '
'applications released in the future, possibly including dependencies for '
'future versions of installed applications.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
]
ADML_PRESENTATIONS = '''\
<presentation id="Pol_AutoUpdateCheckPeriod">
<decimalTextBox refId="Part_AutoUpdateCheckPeriod" defaultValue="1400"
spinStep="60">Minutes between update checks</decimalTextBox>
</presentation>
<presentation id="Pol_UpdateCheckSuppressedPeriod">
<decimalTextBox refId="Part_UpdateCheckSuppressedStartHour"
defaultValue="0" spinStep="1">Hour</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedStartMin"
defaultValue="0" spinStep="1">Minute</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedDurationMin"
defaultValue="60">Duration</decimalTextBox>
</presentation>
<presentation id="Pol_DownloadPreference">
<dropdownList refId="Part_DownloadPreference"
defaultItem="0">Type of download URL to request</dropdownList>
</presentation>
<presentation id="Pol_ProxyMode">
<dropdownList refId="Part_ProxyMode"
defaultItem="0">Choose how to specify proxy server settings
</dropdownList>
</presentation>
<presentation id="Pol_ProxyServer">
<textBox refId="Part_ProxyServer">
<label>Address or URL of proxy server</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_ProxyPacUrl">
<textBox refId="Part_ProxyPacUrl">
<label>URL to a proxy .pac file</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_DefaultAllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_DefaultUpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_AllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_UpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_TargetVersionPrefix">
<textBox refId="Part_TargetVersionPrefix">
<label>Target version prefix</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_RollbackToTargetVersion" />
'''
ADML_RESOURCE_TABLE_TEMPLATE = '''
<resources>
<stringTable>
%s
</stringTable>
<presentationTable>
%s
</presentationTable>
</resources>
'''
ADML_FOOTER = '</policyDefinitionResources>'
def GenerateGroupPolicyTemplateAdml(apps):
"""Generates a Group Policy template (ADML format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADML file.
"""
string_definition_list = ADML_PREDEFINED_STRINGS_TABLE_EN[:]
for app in apps:
app_name = app[0]
app_legal_id = _CreateLegalIdentifier(app_name)
app_additional_help_msg = app[2]
rollback_disclaimer = app[3]
if not rollback_disclaimer:
rollback_disclaimer = ADML_DEFAULT_ROLLBACK_DISCLAIMER
app_category = ('Cat_' + app_legal_id, app_name)
string_definition_list.append(app_category)
app_install_policy_explanation = (
'Explain_Install' + app_legal_id,
'Specifies whether %s can be installed using Google Update/Google '
'Installer.\n\n'
'If this policy is not configured, %s can be installed as specified '
'by "Allow installation default".\n\n'
'%s' % (app_name, app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_install_policy_explanation)
app_auto_update_policy_explanation = (
'Explain_AutoUpdate' + app_legal_id,
'Specifies how Google Update handles available %s updates '
'from Google.\n\n'
'If this policy is not configured, Google Update handles available '
'updates as specified by "Update policy override default".\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, '
'either by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user '
'does a manual update check. (Not all apps provide an interface '
' for this.)\n'
' - Automatic silent updates only: Updates are only applied when '
'they are found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for '
'updates using the application\'s manual update mechanism if '
'available. If you disable updates, you should periodically check '
'for updates and distribute them to users.%s\n\n'
'%s' %
(app_name, app_additional_help_msg, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_auto_update_policy_explanation)
app_target_version_prefix_explanation = (
'Explain_TargetVersionPrefix' + app_legal_id,
'Specifies which version %s should be updated to.\n\n'
'When this policy is enabled, the app will be updated to the version '
'prefixed with this policy value.\n\nSome examples:\n'
'1) Not configured: app will be updated to the latest version '
'available.\n'
'2) Policy value is set to "55.": the app will be updated to any minor '
'version of 55 (e.g., 55.24.34 or 55.60.2).\n'
'3) Policy value is "55.2.": the app will be updated to any minor '
'version of 55.2 (e.g., 55.2.34 or 55.2.2).\n'
'4) Policy value is "55.24.34": the app will be updated to this '
'specific version only.\n\n'
'%s' % (app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_target_version_prefix_explanation)
app_rollback_to_target_version_explanation = (
'Explain_RollbackToTargetVersion' + app_legal_id,
'Specifies that Google Update should roll installations of %s back to '
'the version indicated by "Target version prefix override".\n\n'
'This policy setting has no effect unless "Target version prefix '
'override" is set.\n\n'
'If this policy is not configured or is disabled, installs that have a '
'version higher than that specified by "Target version prefix '
'override" will be left as-is.\n\n'
'If this policy is enabled, installs that have a version higher than '
'that specified by "Target version prefix override" will be downgraded '
'to the highest available version that matches the target version.\n\n'
'%s\n\n'
'%s' % (app_name, rollback_disclaimer, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_rollback_to_target_version_explanation)
app_resource_strings = []
for entry in string_definition_list:
app_resource_strings.append(' <string id="%s">%s</string>' %
(entry[0], entry[1]))
app_resource_tables = (ADML_RESOURCE_TABLE_TEMPLATE %
('\n'.join(app_resource_strings), ADML_PRESENTATIONS))
target_contents = [
ADML_HEADER,
ADML_ENVIRONMENT,
app_resource_tables,
ADML_FOOTER,
]
return ''.join(target_contents)
def WriteGroupPolicyTemplateAdmx(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdmx(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
def WriteGroupPolicyTemplateAdml(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdml(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
# Run a unit test when the module is run directly.
if __name__ == '__main__':
TEST_APPS = [
('Google Test Foo', '{D6B08267-B440-4c85-9F79-E195E80D9937}',
' Check http://www.google.com/test_foo/.',
'Disclaimer'),
(u'Google User Test Foo\u00a9\u00ae\u2122',
'{104844D6-7DDA-460b-89F0-FBF8AFDD0A67}',
' Check http://www.google.com/user_test_foo/.',
''),
]
module_dir = os.path.abspath(os.path.dirname(__file__))
gold_path = os.path.join(module_dir, 'test_gold.admx')
output_path = os.path.join(module_dir, 'test_out.admx')
WriteGroupPolicyTemplateAdmx(output_path, TEST_APPS)
admx_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not admx_files_equal:
print('FAIL: ADMX files are not equal.')
gold_path = os.path.join(module_dir, 'test_gold.adml')
output_path = os.path.join(module_dir, 'test_out.adml')
WriteGroupPolicyTemplateAdml(output_path, TEST_APPS)
adml_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not adml_files_equal:
print('FAIL: ADML files are not equal.')
if admx_files_equal and adml_files_equal:
print('SUCCESS. contents are equal')
else:
sys.exit(-1)
|
_GeneratePolicies
|
Generates policy string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the policy template for each app
in apps, each populated with the appropriate app-specific strings.
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Generates a Group Policy admx/adml template file for Google Update policies.
The resulting strings and files use CRLF as required by gpedit.msc.
To unit test this module, just run the file from the command line.
"""
from __future__ import print_function
import codecs
import filecmp
import os
import re
import sys
MAIN_POLICY_KEY = r'Software\Policies\HuhiSoftware\Update'
ADMX_HEADER = '<policyDefinitions revision="1.0" schemaVersion="1.0">'
ADMX_ENVIRONMENT = '''
<policyNamespaces>
<target namespace="Google.Policies.Update" prefix="update"/>
<using namespace="Google.Policies" prefix="Google"/>
<using prefix="windows" namespace="Microsoft.Policies.Windows" />
</policyNamespaces>
<supersededAdm fileName="GoogleUpdate.adm" />
<resources minRequiredRevision="1.0" />
<supportedOn>
<definitions>
<definition name="Sup_GoogleUpdate1_2_145_5"
displayName="$(string.Sup_GoogleUpdate1_2_145_5)" />
<definition name="Sup_GoogleUpdate1_3_21_81"
displayName="$(string.Sup_GoogleUpdate1_3_21_81)" />
<definition name="Sup_GoogleUpdate1_3_26_0"
displayName="$(string.Sup_GoogleUpdate1_3_26_0)" />
<definition name="Sup_GoogleUpdate1_3_33_5"
displayName="$(string.Sup_GoogleUpdate1_3_33_5)" />
<definition name="Sup_GoogleUpdate1_3_34_3"
displayName="$(string.Sup_GoogleUpdate1_3_34_3)" />
</definitions>
</supportedOn>
'''
ADMX_CATEGORIES = r'''
<categories>
<category name="Cat_GoogleUpdate" displayName="$(string.Cat_GoogleUpdate)"
explainText="$(string.Explain_GoogleUpdate)">
<parentCategory ref="Google:Cat_Google" />
</category>
<category name="Cat_Preferences" displayName="$(string.Cat_Preferences)"
explainText="$(string.Explain_Preferences)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_ProxyServer" displayName="$(string.Cat_ProxyServer)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_Applications" displayName="$(string.Cat_Applications)"
explainText="$(string.Explain_Applications)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
%(AppCategorList)s
</categories>
'''
ADMX_POLICIES = r'''
<policies>
<policy name="Pol_AutoUpdateCheckPeriod" class="Machine"
displayName="$(string.Pol_AutoUpdateCheckPeriod)"
explainText="$(string.Explain_AutoUpdateCheckPeriod)"
presentation="$(presentation.Pol_AutoUpdateCheckPeriod)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<decimal id="Part_AutoUpdateCheckPeriod"
key="%(RootPolicyKey)s"
valueName="AutoUpdateCheckPeriodMinutes"
required="true" minValue="0" maxValue="43200" />
</elements>
</policy>
<policy name="Pol_DownloadPreference" class="Machine"
displayName="$(string.Pol_DownloadPreference)"
explainText="$(string.Explain_DownloadPreference)"
presentation="$(presentation.Pol_DownloadPreference)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_26_0" />
<elements>
<enum id="Part_DownloadPreference" key="%(RootPolicyKey)s"
valueName="DownloadPreference">
<item displayName="$(string.DownloadPreference_DropDown)">
<value>
<string>cacheable</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdateCheckSuppressedPeriod" class="Machine"
displayName="$(string.Pol_UpdateCheckSuppressedPeriod)"
explainText="$(string.Explain_UpdateCheckSuppressedPeriod)"
presentation="$(presentation.Pol_UpdateCheckSuppressedPeriod)"
key="Software\Policies\Google\Update">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<decimal id="Part_UpdateCheckSuppressedStartHour"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartHour"
required="true" minValue="0" maxValue="23" />
<decimal id="Part_UpdateCheckSuppressedStartMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartMin"
required="true" minValue="0" maxValue="59" />
<decimal id="Part_UpdateCheckSuppressedDurationMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedDurationMin"
required="true" minValue="1" maxValue="960" />
</elements>
</policy>
<policy name="Pol_ProxyMode" class="Machine"
displayName="$(string.Pol_ProxyMode)"
explainText="$(string.Explain_ProxyMode)"
presentation="$(presentation.Pol_ProxyMode)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<enum id="Part_ProxyMode" key="%(RootPolicyKey)s"
valueName="ProxyMode">
<item displayName="$(string.ProxyDisabled_DropDown)">
<value>
<string>direct</string>
</value>
</item>
<item displayName="$(string.ProxyAutoDetect_DropDown)">
<value>
<string>auto_detect</string>
</value>
</item>
<item displayName="$(string.ProxyPacScript_DropDown)">
<value>
<string>pac_script</string>
</value>
</item>
<item displayName="$(string.ProxyFixedServers_DropDown)">
<value>
<string>fixed_servers</string>
</value>
</item>
<item displayName="$(string.ProxyUseSystem_DropDown)">
<value>
<string>system</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_ProxyServer" class="Machine"
displayName="$(string.Pol_ProxyServer)"
explainText="$(string.Explain_ProxyServer)"
presentation="$(presentation.Pol_ProxyServer)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyServer" valueName="ProxyServer" />
</elements>
</policy>
<policy name="Pol_ProxyPacUrl" class="Machine"
displayName="$(string.Pol_ProxyPacUrl)"
explainText="$(string.Explain_ProxyPacUrl)"
presentation="$(presentation.Pol_ProxyPacUrl)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyPacUrl" valueName="ProxyPacUrl" />
</elements>
</policy>
<policy name="Pol_DefaultAllowInstallation" class="Machine"
displayName="$(string.Pol_DefaultAllowInstallation)"
explainText="$(string.Explain_DefaultAllowInstallation)"
presentation="$(presentation.Pol_DefaultAllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy" key="%(RootPolicyKey)s"
valueName="InstallDefault" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_DefaultUpdatePolicy" class="Machine"
displayName="$(string.Pol_DefaultUpdatePolicy)"
explainText="$(string.Explain_DefaultUpdatePolicy)"
presentation="$(presentation.Pol_DefaultUpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy" key="%(RootPolicyKey)s"
valueName="UpdateDefault" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
%(AppPolicyList)s
</policies>
'''
ADMX_APP_POLICY_TEMPLATE = '''\
<policy name="Pol_AllowInstallation%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_AllowInstallation)"
explainText="$(string.Explain_Install%(AppLegalId)s)"
presentation="$(presentation.Pol_AllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy"
valueName="Install%(AppGuid)s" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdatePolicy%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_UpdatePolicy)"
explainText="$(string.Explain_AutoUpdate%(AppLegalId)s)"
presentation="$(presentation.Pol_UpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy"
valueName="Update%(AppGuid)s" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_TargetVersionPrefix%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_TargetVersionPrefix)"
explainText="$(string.Explain_TargetVersionPrefix%(AppLegalId)s)"
presentation="$(presentation.Pol_TargetVersionPrefix)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<text id="Part_TargetVersionPrefix"
valueName="TargetVersionPrefix%(AppGuid)s" />
</elements>
</policy>
<policy name="Pol_RollbackToTargetVersion%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_RollbackToTargetVersion)"
explainText="$(string.Explain_RollbackToTargetVersion%(AppLegalId)s)"
presentation="$(presentation.Pol_RollbackToTargetVersion)"
key="%(RootPolicyKey)s"
valueName="RollbackToTargetVersion%(AppGuid)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_34_3" />
<enabledValue><decimal value="1" /></enabledValue>
<disabledValue><decimal value="0" /></disabledValue>
</policy>'''
ADMX_FOOTER = '</policyDefinitions>'
def _CreateLegalIdentifier(input_string):
"""Converts input_string to a legal identifier for ADMX/ADML files.
Changes some characters that do not necessarily cause problems and may not
handle all cases.
Args:
input_string: Text to convert to a legal identifier.
Returns:
String containing a legal identifier based on input_string.
"""
return re.sub(r'[\W_]', '', input_string)
def GenerateGroupPolicyTemplateAdmx(apps):
"""Generates a Group Policy template (ADMX format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADMX file.
"""
def _GenerateCategories(apps):
"""Generates category string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the category string for each app
in apps, each populated with the appropriate app-specific strings.
"""
admx_app_category_template = (
' <category name="Cat_%(AppLegalId)s"\n'
' displayName="$(string.Cat_%(AppLegalId)s)">\n'
' <parentCategory ref="Cat_Applications" />\n'
' </category>')
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append(admx_app_category_template % {
'AppLegalId': _CreateLegalIdentifier(app_name)
})
return ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)}
# MASKED: _GeneratePolicies function (lines 411-434)
target_contents = [
ADMX_HEADER,
ADMX_ENVIRONMENT,
_GenerateCategories(apps),
_GeneratePolicies(apps),
ADMX_FOOTER,
]
return ''.join(target_contents)
ADML_HEADER = '''\
<policyDefinitionResources revision="1.0" schemaVersion="1.0">
'''
ADML_ENVIRONMENT = '''\
<displayName>
</displayName>
<description>
</description>
'''
ADML_DEFAULT_ROLLBACK_DISCLAIMER = (
'This policy is meant to serve as temporary measure when Enterprise '
'Administrators need to downgrade for business reasons. To ensure '
'users are protected by the latest security updates, the most recent '
'version should be used. When versions are downgraded to older '
'versions, there could be incompatibilities.')
ADML_DOMAIN_REQUIREMENT_EN = (
'This policy is available only on Windows instances that are joined to a '
'Microsoft® Active Directory® domain.')
ADML_PREDEFINED_STRINGS_TABLE_EN = [
('Sup_GoogleUpdate1_2_145_5', 'At least Google Update 1.2.145.5'),
('Sup_GoogleUpdate1_3_21_81', 'At least Google Update 1.3.21.81'),
('Sup_GoogleUpdate1_3_26_0', 'At least Google Update 1.3.26.0'),
('Sup_GoogleUpdate1_3_33_5', 'At least Google Update 1.3.33.5'),
('Sup_GoogleUpdate1_3_34_3', 'At least Google Update 1.3.34.3'),
('Cat_GoogleUpdate', 'Google Update'),
('Cat_Preferences', 'Preferences'),
('Cat_ProxyServer', 'Proxy Server'),
('Cat_Applications', 'Applications'),
('Pol_AutoUpdateCheckPeriod', 'Auto-update check period override'),
('Pol_UpdateCheckSuppressedPeriod',
'Time period in each day to suppress auto-update check'),
('Pol_DownloadPreference', 'Download URL class override'),
('DownloadPreference_DropDown', 'Cacheable download URLs'),
('Pol_ProxyMode', 'Choose how to specify proxy server settings'),
('Pol_ProxyServer', 'Address or URL of proxy server'),
('Pol_ProxyPacUrl', 'URL to a proxy .pac file'),
('Pol_DefaultAllowInstallation', 'Allow installation default'),
('Pol_AllowInstallation', 'Allow installation'),
('Pol_DefaultUpdatePolicy', 'Update policy override default'),
('Pol_UpdatePolicy', 'Update policy override'),
('Pol_TargetVersionPrefix', 'Target version prefix override'),
('Pol_RollbackToTargetVersion', 'Rollback to Target version'),
('Part_AutoUpdateCheckPeriod', 'Minutes between update checks'),
('Part_UpdateCheckSuppressedStartHour',
'Hour in a day that start to suppress update check'),
('Part_UpdateCheckSuppressedStartMin',
'Minute in hour that starts to suppress update check'),
('Part_UpdateCheckSuppressedDurationMin',
'Number of minutes to suppress update check each day'),
('Part_ProxyMode', 'Choose how to specify proxy server settings'),
('Part_ProxyServer', 'Address or URL of proxy server'),
('Part_ProxyPacUrl', 'URL to a proxy .pac file'),
('Part_InstallPolicy', 'Policy'),
('Name_InstallsEnabled', 'Always allow Installs (recommended)'),
('Name_InstallsEnabledMachineOnly',
'Always allow Machine-Wide Installs, but not Per-User Installs.'),
('Name_InstallsDisabled', 'Installs disabled'),
('Part_UpdatePolicy', 'Policy'),
('Part_TargetVersionPrefix', 'Target version prefix'),
('Name_UpdatesEnabled', 'Always allow updates (recommended)'),
('Name_ManualUpdatesOnly', 'Manual updates only'),
('Name_AutomaticUpdatesOnly', 'Automatic silent updates only'),
('Name_UpdatesDisabled', 'Updates disabled'),
('ProxyDisabled_DropDown', 'Never use a proxy'),
('ProxyAutoDetect_DropDown', 'Auto detect proxy settings'),
('ProxyPacScript_DropDown', 'Use a .pac proxy script'),
('ProxyFixedServers_DropDown', 'Use fixed proxy servers'),
('ProxyUseSystem_DropDown', 'Use system proxy settings'),
('Explain_GoogleUpdate',
'Policies to control the installation and updating of Google applications '
'that use Google Update/Google Installer.'),
('Explain_Preferences', 'General policies for Google Update.'),
('Explain_AutoUpdateCheckPeriod',
'Minimum number of minutes between automatic update checks.\n\n'
'Set the value to 0 if you want to disable all auto-update checks '
'(not recommended).\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DownloadPreference',
'If enabled, the Google Update server will attempt to provide '
'cache-friendly URLs for update payloads in its responses.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_UpdateCheckSuppressedPeriod',
'If this setting is enabled, update checks will be suppressed during '
'each day starting from Hour:Minute for a period of Duration (in minutes).'
' Duration does not account for daylight savings time. So for instance, '
'if the start time is 22:00, and with a duration of 480 minutes, the '
'updates will be suppressed for 8 hours regardless of whether daylight '
'savings time changes happen in between.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyMode',
'Allows you to specify the proxy server used by Google Update.\n\n'
'If you choose to never use a proxy server and always connect directly, '
'all other options are ignored.\n\n'
'If you choose to use system proxy settings or auto detect the proxy '
'server, all other options are ignored.\n\n'
'If you choose fixed server proxy mode, you can specify further options '
'in \'Address or URL of proxy server\'.\n\n'
'If you choose to use a .pac proxy script, you must specify the URL to '
'the script in \'URL to a proxy .pac file\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyServer',
'You can specify the URL of the proxy server here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyPacUrl',
'You can specify a URL to a proxy .pac file here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_Applications', 'Policies for individual applications.\n\n'
'An updated ADMX/ADML template will be required to support '
'Google applications released in the future.'),
('Explain_DefaultAllowInstallation',
'Specifies the default behavior for whether Google software can be '
'installed using Google Update/Google Installer.\n\n'
'Can be overridden by the "Allow installation" for individual '
'applications.\n\n'
'Only affects installation of Google software using Google Update/Google '
'Installer. Cannot prevent running the application installer directly or '
'installation of Google software that does not use Google Update/Google '
'Installer for installation.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DefaultUpdatePolicy',
'Specifies the default policy for software updates from Google.\n\n'
'Can be overridden by the "Update policy override" for individual '
'applications.\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, either '
'by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user does a '
'manual update check. (Not all apps provide an interface for this.)\n'
' - Automatic silent updates only: Updates are only applied when they are '
'found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for updates '
'using each application\'s manual update mechanism if available. If you '
'disable updates, you should periodically check for updates and '
'distribute them to users.\n\n'
'Only affects updates for Google software that uses Google Update for '
'updates. Does not prevent auto-updates of Google software that does not '
'use Google Update for updates.\n\n'
'Updates for Google Update are not affected by this setting; Google '
'Update will continue to update itself while it is installed.\n\n'
'WARNING: Disabing updates will also prevent updates of any new Google '
'applications released in the future, possibly including dependencies for '
'future versions of installed applications.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
]
ADML_PRESENTATIONS = '''\
<presentation id="Pol_AutoUpdateCheckPeriod">
<decimalTextBox refId="Part_AutoUpdateCheckPeriod" defaultValue="1400"
spinStep="60">Minutes between update checks</decimalTextBox>
</presentation>
<presentation id="Pol_UpdateCheckSuppressedPeriod">
<decimalTextBox refId="Part_UpdateCheckSuppressedStartHour"
defaultValue="0" spinStep="1">Hour</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedStartMin"
defaultValue="0" spinStep="1">Minute</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedDurationMin"
defaultValue="60">Duration</decimalTextBox>
</presentation>
<presentation id="Pol_DownloadPreference">
<dropdownList refId="Part_DownloadPreference"
defaultItem="0">Type of download URL to request</dropdownList>
</presentation>
<presentation id="Pol_ProxyMode">
<dropdownList refId="Part_ProxyMode"
defaultItem="0">Choose how to specify proxy server settings
</dropdownList>
</presentation>
<presentation id="Pol_ProxyServer">
<textBox refId="Part_ProxyServer">
<label>Address or URL of proxy server</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_ProxyPacUrl">
<textBox refId="Part_ProxyPacUrl">
<label>URL to a proxy .pac file</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_DefaultAllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_DefaultUpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_AllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_UpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_TargetVersionPrefix">
<textBox refId="Part_TargetVersionPrefix">
<label>Target version prefix</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_RollbackToTargetVersion" />
'''
ADML_RESOURCE_TABLE_TEMPLATE = '''
<resources>
<stringTable>
%s
</stringTable>
<presentationTable>
%s
</presentationTable>
</resources>
'''
ADML_FOOTER = '</policyDefinitionResources>'
def GenerateGroupPolicyTemplateAdml(apps):
"""Generates a Group Policy template (ADML format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADML file.
"""
string_definition_list = ADML_PREDEFINED_STRINGS_TABLE_EN[:]
for app in apps:
app_name = app[0]
app_legal_id = _CreateLegalIdentifier(app_name)
app_additional_help_msg = app[2]
rollback_disclaimer = app[3]
if not rollback_disclaimer:
rollback_disclaimer = ADML_DEFAULT_ROLLBACK_DISCLAIMER
app_category = ('Cat_' + app_legal_id, app_name)
string_definition_list.append(app_category)
app_install_policy_explanation = (
'Explain_Install' + app_legal_id,
'Specifies whether %s can be installed using Google Update/Google '
'Installer.\n\n'
'If this policy is not configured, %s can be installed as specified '
'by "Allow installation default".\n\n'
'%s' % (app_name, app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_install_policy_explanation)
app_auto_update_policy_explanation = (
'Explain_AutoUpdate' + app_legal_id,
'Specifies how Google Update handles available %s updates '
'from Google.\n\n'
'If this policy is not configured, Google Update handles available '
'updates as specified by "Update policy override default".\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, '
'either by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user '
'does a manual update check. (Not all apps provide an interface '
' for this.)\n'
' - Automatic silent updates only: Updates are only applied when '
'they are found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for '
'updates using the application\'s manual update mechanism if '
'available. If you disable updates, you should periodically check '
'for updates and distribute them to users.%s\n\n'
'%s' %
(app_name, app_additional_help_msg, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_auto_update_policy_explanation)
app_target_version_prefix_explanation = (
'Explain_TargetVersionPrefix' + app_legal_id,
'Specifies which version %s should be updated to.\n\n'
'When this policy is enabled, the app will be updated to the version '
'prefixed with this policy value.\n\nSome examples:\n'
'1) Not configured: app will be updated to the latest version '
'available.\n'
'2) Policy value is set to "55.": the app will be updated to any minor '
'version of 55 (e.g., 55.24.34 or 55.60.2).\n'
'3) Policy value is "55.2.": the app will be updated to any minor '
'version of 55.2 (e.g., 55.2.34 or 55.2.2).\n'
'4) Policy value is "55.24.34": the app will be updated to this '
'specific version only.\n\n'
'%s' % (app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_target_version_prefix_explanation)
app_rollback_to_target_version_explanation = (
'Explain_RollbackToTargetVersion' + app_legal_id,
'Specifies that Google Update should roll installations of %s back to '
'the version indicated by "Target version prefix override".\n\n'
'This policy setting has no effect unless "Target version prefix '
'override" is set.\n\n'
'If this policy is not configured or is disabled, installs that have a '
'version higher than that specified by "Target version prefix '
'override" will be left as-is.\n\n'
'If this policy is enabled, installs that have a version higher than '
'that specified by "Target version prefix override" will be downgraded '
'to the highest available version that matches the target version.\n\n'
'%s\n\n'
'%s' % (app_name, rollback_disclaimer, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_rollback_to_target_version_explanation)
app_resource_strings = []
for entry in string_definition_list:
app_resource_strings.append(' <string id="%s">%s</string>' %
(entry[0], entry[1]))
app_resource_tables = (ADML_RESOURCE_TABLE_TEMPLATE %
('\n'.join(app_resource_strings), ADML_PRESENTATIONS))
target_contents = [
ADML_HEADER,
ADML_ENVIRONMENT,
app_resource_tables,
ADML_FOOTER,
]
return ''.join(target_contents)
def WriteGroupPolicyTemplateAdmx(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdmx(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
def WriteGroupPolicyTemplateAdml(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdml(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
# Run a unit test when the module is run directly.
if __name__ == '__main__':
TEST_APPS = [
('Google Test Foo', '{D6B08267-B440-4c85-9F79-E195E80D9937}',
' Check http://www.google.com/test_foo/.',
'Disclaimer'),
(u'Google User Test Foo\u00a9\u00ae\u2122',
'{104844D6-7DDA-460b-89F0-FBF8AFDD0A67}',
' Check http://www.google.com/user_test_foo/.',
''),
]
module_dir = os.path.abspath(os.path.dirname(__file__))
gold_path = os.path.join(module_dir, 'test_gold.admx')
output_path = os.path.join(module_dir, 'test_out.admx')
WriteGroupPolicyTemplateAdmx(output_path, TEST_APPS)
admx_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not admx_files_equal:
print('FAIL: ADMX files are not equal.')
gold_path = os.path.join(module_dir, 'test_gold.adml')
output_path = os.path.join(module_dir, 'test_out.adml')
WriteGroupPolicyTemplateAdml(output_path, TEST_APPS)
adml_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not adml_files_equal:
print('FAIL: ADML files are not equal.')
if admx_files_equal and adml_files_equal:
print('SUCCESS. contents are equal')
else:
sys.exit(-1)
|
def _GeneratePolicies(apps):
"""Generates policy string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the policy template for each app
in apps, each populated with the appropriate app-specific strings.
"""
app_policy_list = []
for app in apps:
app_name, app_guid, _, _ = app
app_policy_list.append(ADMX_APP_POLICY_TEMPLATE % {
'AppLegalId': _CreateLegalIdentifier(app_name),
'AppGuid': app_guid,
'RootPolicyKey': MAIN_POLICY_KEY,
})
return ADMX_POLICIES % {
'AppPolicyList': '\n'.join(app_policy_list),
'RootPolicyKey': MAIN_POLICY_KEY,
}
| 411 | 434 |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Generates a Group Policy admx/adml template file for Google Update policies.
The resulting strings and files use CRLF as required by gpedit.msc.
To unit test this module, just run the file from the command line.
"""
from __future__ import print_function
import codecs
import filecmp
import os
import re
import sys
MAIN_POLICY_KEY = r'Software\Policies\HuhiSoftware\Update'
ADMX_HEADER = '<policyDefinitions revision="1.0" schemaVersion="1.0">'
ADMX_ENVIRONMENT = '''
<policyNamespaces>
<target namespace="Google.Policies.Update" prefix="update"/>
<using namespace="Google.Policies" prefix="Google"/>
<using prefix="windows" namespace="Microsoft.Policies.Windows" />
</policyNamespaces>
<supersededAdm fileName="GoogleUpdate.adm" />
<resources minRequiredRevision="1.0" />
<supportedOn>
<definitions>
<definition name="Sup_GoogleUpdate1_2_145_5"
displayName="$(string.Sup_GoogleUpdate1_2_145_5)" />
<definition name="Sup_GoogleUpdate1_3_21_81"
displayName="$(string.Sup_GoogleUpdate1_3_21_81)" />
<definition name="Sup_GoogleUpdate1_3_26_0"
displayName="$(string.Sup_GoogleUpdate1_3_26_0)" />
<definition name="Sup_GoogleUpdate1_3_33_5"
displayName="$(string.Sup_GoogleUpdate1_3_33_5)" />
<definition name="Sup_GoogleUpdate1_3_34_3"
displayName="$(string.Sup_GoogleUpdate1_3_34_3)" />
</definitions>
</supportedOn>
'''
ADMX_CATEGORIES = r'''
<categories>
<category name="Cat_GoogleUpdate" displayName="$(string.Cat_GoogleUpdate)"
explainText="$(string.Explain_GoogleUpdate)">
<parentCategory ref="Google:Cat_Google" />
</category>
<category name="Cat_Preferences" displayName="$(string.Cat_Preferences)"
explainText="$(string.Explain_Preferences)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_ProxyServer" displayName="$(string.Cat_ProxyServer)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
<category name="Cat_Applications" displayName="$(string.Cat_Applications)"
explainText="$(string.Explain_Applications)">
<parentCategory ref="Cat_GoogleUpdate" />
</category>
%(AppCategorList)s
</categories>
'''
ADMX_POLICIES = r'''
<policies>
<policy name="Pol_AutoUpdateCheckPeriod" class="Machine"
displayName="$(string.Pol_AutoUpdateCheckPeriod)"
explainText="$(string.Explain_AutoUpdateCheckPeriod)"
presentation="$(presentation.Pol_AutoUpdateCheckPeriod)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<decimal id="Part_AutoUpdateCheckPeriod"
key="%(RootPolicyKey)s"
valueName="AutoUpdateCheckPeriodMinutes"
required="true" minValue="0" maxValue="43200" />
</elements>
</policy>
<policy name="Pol_DownloadPreference" class="Machine"
displayName="$(string.Pol_DownloadPreference)"
explainText="$(string.Explain_DownloadPreference)"
presentation="$(presentation.Pol_DownloadPreference)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_26_0" />
<elements>
<enum id="Part_DownloadPreference" key="%(RootPolicyKey)s"
valueName="DownloadPreference">
<item displayName="$(string.DownloadPreference_DropDown)">
<value>
<string>cacheable</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdateCheckSuppressedPeriod" class="Machine"
displayName="$(string.Pol_UpdateCheckSuppressedPeriod)"
explainText="$(string.Explain_UpdateCheckSuppressedPeriod)"
presentation="$(presentation.Pol_UpdateCheckSuppressedPeriod)"
key="Software\Policies\Google\Update">
<parentCategory ref="Cat_Preferences" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<decimal id="Part_UpdateCheckSuppressedStartHour"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartHour"
required="true" minValue="0" maxValue="23" />
<decimal id="Part_UpdateCheckSuppressedStartMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedStartMin"
required="true" minValue="0" maxValue="59" />
<decimal id="Part_UpdateCheckSuppressedDurationMin"
key="Software\Policies\Google\Update"
valueName="UpdatesSuppressedDurationMin"
required="true" minValue="1" maxValue="960" />
</elements>
</policy>
<policy name="Pol_ProxyMode" class="Machine"
displayName="$(string.Pol_ProxyMode)"
explainText="$(string.Explain_ProxyMode)"
presentation="$(presentation.Pol_ProxyMode)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<enum id="Part_ProxyMode" key="%(RootPolicyKey)s"
valueName="ProxyMode">
<item displayName="$(string.ProxyDisabled_DropDown)">
<value>
<string>direct</string>
</value>
</item>
<item displayName="$(string.ProxyAutoDetect_DropDown)">
<value>
<string>auto_detect</string>
</value>
</item>
<item displayName="$(string.ProxyPacScript_DropDown)">
<value>
<string>pac_script</string>
</value>
</item>
<item displayName="$(string.ProxyFixedServers_DropDown)">
<value>
<string>fixed_servers</string>
</value>
</item>
<item displayName="$(string.ProxyUseSystem_DropDown)">
<value>
<string>system</string>
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_ProxyServer" class="Machine"
displayName="$(string.Pol_ProxyServer)"
explainText="$(string.Explain_ProxyServer)"
presentation="$(presentation.Pol_ProxyServer)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyServer" valueName="ProxyServer" />
</elements>
</policy>
<policy name="Pol_ProxyPacUrl" class="Machine"
displayName="$(string.Pol_ProxyPacUrl)"
explainText="$(string.Explain_ProxyPacUrl)"
presentation="$(presentation.Pol_ProxyPacUrl)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_ProxyServer" />
<supportedOn ref="Sup_GoogleUpdate1_3_21_81" />
<elements>
<text id="Part_ProxyPacUrl" valueName="ProxyPacUrl" />
</elements>
</policy>
<policy name="Pol_DefaultAllowInstallation" class="Machine"
displayName="$(string.Pol_DefaultAllowInstallation)"
explainText="$(string.Explain_DefaultAllowInstallation)"
presentation="$(presentation.Pol_DefaultAllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy" key="%(RootPolicyKey)s"
valueName="InstallDefault" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_DefaultUpdatePolicy" class="Machine"
displayName="$(string.Pol_DefaultUpdatePolicy)"
explainText="$(string.Explain_DefaultUpdatePolicy)"
presentation="$(presentation.Pol_DefaultUpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_Applications" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy" key="%(RootPolicyKey)s"
valueName="UpdateDefault" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
%(AppPolicyList)s
</policies>
'''
ADMX_APP_POLICY_TEMPLATE = '''\
<policy name="Pol_AllowInstallation%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_AllowInstallation)"
explainText="$(string.Explain_Install%(AppLegalId)s)"
presentation="$(presentation.Pol_AllowInstallation)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_InstallPolicy"
valueName="Install%(AppGuid)s" required="true">
<item displayName="$(string.Name_InstallsEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_InstallsEnabledMachineOnly)">
<value>
<decimal value="4" />
</value>
</item>
<item displayName="$(string.Name_InstallsDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_UpdatePolicy%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_UpdatePolicy)"
explainText="$(string.Explain_AutoUpdate%(AppLegalId)s)"
presentation="$(presentation.Pol_UpdatePolicy)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_2_145_5" />
<elements>
<enum id="Part_UpdatePolicy"
valueName="Update%(AppGuid)s" required="true">
<item displayName="$(string.Name_UpdatesEnabled)">
<value>
<decimal value="1" />
</value>
</item>
<item displayName="$(string.Name_ManualUpdatesOnly)">
<value>
<decimal value="2" />
</value>
</item>
<item displayName="$(string.Name_AutomaticUpdatesOnly)">
<value>
<decimal value="3" />
</value>
</item>
<item displayName="$(string.Name_UpdatesDisabled)">
<value>
<decimal value="0" />
</value>
</item>
</enum>
</elements>
</policy>
<policy name="Pol_TargetVersionPrefix%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_TargetVersionPrefix)"
explainText="$(string.Explain_TargetVersionPrefix%(AppLegalId)s)"
presentation="$(presentation.Pol_TargetVersionPrefix)"
key="%(RootPolicyKey)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_33_5" />
<elements>
<text id="Part_TargetVersionPrefix"
valueName="TargetVersionPrefix%(AppGuid)s" />
</elements>
</policy>
<policy name="Pol_RollbackToTargetVersion%(AppLegalId)s" class="Machine"
displayName="$(string.Pol_RollbackToTargetVersion)"
explainText="$(string.Explain_RollbackToTargetVersion%(AppLegalId)s)"
presentation="$(presentation.Pol_RollbackToTargetVersion)"
key="%(RootPolicyKey)s"
valueName="RollbackToTargetVersion%(AppGuid)s">
<parentCategory ref="Cat_%(AppLegalId)s" />
<supportedOn ref="Sup_GoogleUpdate1_3_34_3" />
<enabledValue><decimal value="1" /></enabledValue>
<disabledValue><decimal value="0" /></disabledValue>
</policy>'''
ADMX_FOOTER = '</policyDefinitions>'
def _CreateLegalIdentifier(input_string):
"""Converts input_string to a legal identifier for ADMX/ADML files.
Changes some characters that do not necessarily cause problems and may not
handle all cases.
Args:
input_string: Text to convert to a legal identifier.
Returns:
String containing a legal identifier based on input_string.
"""
return re.sub(r'[\W_]', '', input_string)
def GenerateGroupPolicyTemplateAdmx(apps):
"""Generates a Group Policy template (ADMX format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADMX file.
"""
def _GenerateCategories(apps):
"""Generates category string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the category string for each app
in apps, each populated with the appropriate app-specific strings.
"""
admx_app_category_template = (
' <category name="Cat_%(AppLegalId)s"\n'
' displayName="$(string.Cat_%(AppLegalId)s)">\n'
' <parentCategory ref="Cat_Applications" />\n'
' </category>')
app_category_list = []
for app in apps:
app_name = app[0]
app_category_list.append(admx_app_category_template % {
'AppLegalId': _CreateLegalIdentifier(app_name)
})
return ADMX_CATEGORIES % {'AppCategorList': '\n'.join(app_category_list)}
def _GeneratePolicies(apps):
"""Generates policy string for each of the specified apps.
Args:
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the policy template for each app
in apps, each populated with the appropriate app-specific strings.
"""
app_policy_list = []
for app in apps:
app_name, app_guid, _, _ = app
app_policy_list.append(ADMX_APP_POLICY_TEMPLATE % {
'AppLegalId': _CreateLegalIdentifier(app_name),
'AppGuid': app_guid,
'RootPolicyKey': MAIN_POLICY_KEY,
})
return ADMX_POLICIES % {
'AppPolicyList': '\n'.join(app_policy_list),
'RootPolicyKey': MAIN_POLICY_KEY,
}
target_contents = [
ADMX_HEADER,
ADMX_ENVIRONMENT,
_GenerateCategories(apps),
_GeneratePolicies(apps),
ADMX_FOOTER,
]
return ''.join(target_contents)
ADML_HEADER = '''\
<policyDefinitionResources revision="1.0" schemaVersion="1.0">
'''
ADML_ENVIRONMENT = '''\
<displayName>
</displayName>
<description>
</description>
'''
ADML_DEFAULT_ROLLBACK_DISCLAIMER = (
'This policy is meant to serve as temporary measure when Enterprise '
'Administrators need to downgrade for business reasons. To ensure '
'users are protected by the latest security updates, the most recent '
'version should be used. When versions are downgraded to older '
'versions, there could be incompatibilities.')
ADML_DOMAIN_REQUIREMENT_EN = (
'This policy is available only on Windows instances that are joined to a '
'Microsoft® Active Directory® domain.')
ADML_PREDEFINED_STRINGS_TABLE_EN = [
('Sup_GoogleUpdate1_2_145_5', 'At least Google Update 1.2.145.5'),
('Sup_GoogleUpdate1_3_21_81', 'At least Google Update 1.3.21.81'),
('Sup_GoogleUpdate1_3_26_0', 'At least Google Update 1.3.26.0'),
('Sup_GoogleUpdate1_3_33_5', 'At least Google Update 1.3.33.5'),
('Sup_GoogleUpdate1_3_34_3', 'At least Google Update 1.3.34.3'),
('Cat_GoogleUpdate', 'Google Update'),
('Cat_Preferences', 'Preferences'),
('Cat_ProxyServer', 'Proxy Server'),
('Cat_Applications', 'Applications'),
('Pol_AutoUpdateCheckPeriod', 'Auto-update check period override'),
('Pol_UpdateCheckSuppressedPeriod',
'Time period in each day to suppress auto-update check'),
('Pol_DownloadPreference', 'Download URL class override'),
('DownloadPreference_DropDown', 'Cacheable download URLs'),
('Pol_ProxyMode', 'Choose how to specify proxy server settings'),
('Pol_ProxyServer', 'Address or URL of proxy server'),
('Pol_ProxyPacUrl', 'URL to a proxy .pac file'),
('Pol_DefaultAllowInstallation', 'Allow installation default'),
('Pol_AllowInstallation', 'Allow installation'),
('Pol_DefaultUpdatePolicy', 'Update policy override default'),
('Pol_UpdatePolicy', 'Update policy override'),
('Pol_TargetVersionPrefix', 'Target version prefix override'),
('Pol_RollbackToTargetVersion', 'Rollback to Target version'),
('Part_AutoUpdateCheckPeriod', 'Minutes between update checks'),
('Part_UpdateCheckSuppressedStartHour',
'Hour in a day that start to suppress update check'),
('Part_UpdateCheckSuppressedStartMin',
'Minute in hour that starts to suppress update check'),
('Part_UpdateCheckSuppressedDurationMin',
'Number of minutes to suppress update check each day'),
('Part_ProxyMode', 'Choose how to specify proxy server settings'),
('Part_ProxyServer', 'Address or URL of proxy server'),
('Part_ProxyPacUrl', 'URL to a proxy .pac file'),
('Part_InstallPolicy', 'Policy'),
('Name_InstallsEnabled', 'Always allow Installs (recommended)'),
('Name_InstallsEnabledMachineOnly',
'Always allow Machine-Wide Installs, but not Per-User Installs.'),
('Name_InstallsDisabled', 'Installs disabled'),
('Part_UpdatePolicy', 'Policy'),
('Part_TargetVersionPrefix', 'Target version prefix'),
('Name_UpdatesEnabled', 'Always allow updates (recommended)'),
('Name_ManualUpdatesOnly', 'Manual updates only'),
('Name_AutomaticUpdatesOnly', 'Automatic silent updates only'),
('Name_UpdatesDisabled', 'Updates disabled'),
('ProxyDisabled_DropDown', 'Never use a proxy'),
('ProxyAutoDetect_DropDown', 'Auto detect proxy settings'),
('ProxyPacScript_DropDown', 'Use a .pac proxy script'),
('ProxyFixedServers_DropDown', 'Use fixed proxy servers'),
('ProxyUseSystem_DropDown', 'Use system proxy settings'),
('Explain_GoogleUpdate',
'Policies to control the installation and updating of Google applications '
'that use Google Update/Google Installer.'),
('Explain_Preferences', 'General policies for Google Update.'),
('Explain_AutoUpdateCheckPeriod',
'Minimum number of minutes between automatic update checks.\n\n'
'Set the value to 0 if you want to disable all auto-update checks '
'(not recommended).\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DownloadPreference',
'If enabled, the Google Update server will attempt to provide '
'cache-friendly URLs for update payloads in its responses.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_UpdateCheckSuppressedPeriod',
'If this setting is enabled, update checks will be suppressed during '
'each day starting from Hour:Minute for a period of Duration (in minutes).'
' Duration does not account for daylight savings time. So for instance, '
'if the start time is 22:00, and with a duration of 480 minutes, the '
'updates will be suppressed for 8 hours regardless of whether daylight '
'savings time changes happen in between.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyMode',
'Allows you to specify the proxy server used by Google Update.\n\n'
'If you choose to never use a proxy server and always connect directly, '
'all other options are ignored.\n\n'
'If you choose to use system proxy settings or auto detect the proxy '
'server, all other options are ignored.\n\n'
'If you choose fixed server proxy mode, you can specify further options '
'in \'Address or URL of proxy server\'.\n\n'
'If you choose to use a .pac proxy script, you must specify the URL to '
'the script in \'URL to a proxy .pac file\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyServer',
'You can specify the URL of the proxy server here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_ProxyPacUrl',
'You can specify a URL to a proxy .pac file here.\n\n'
'This policy only takes effect if you have selected manual proxy settings '
'at \'Choose how to specify proxy server settings\'.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_Applications', 'Policies for individual applications.\n\n'
'An updated ADMX/ADML template will be required to support '
'Google applications released in the future.'),
('Explain_DefaultAllowInstallation',
'Specifies the default behavior for whether Google software can be '
'installed using Google Update/Google Installer.\n\n'
'Can be overridden by the "Allow installation" for individual '
'applications.\n\n'
'Only affects installation of Google software using Google Update/Google '
'Installer. Cannot prevent running the application installer directly or '
'installation of Google software that does not use Google Update/Google '
'Installer for installation.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
('Explain_DefaultUpdatePolicy',
'Specifies the default policy for software updates from Google.\n\n'
'Can be overridden by the "Update policy override" for individual '
'applications.\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, either '
'by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user does a '
'manual update check. (Not all apps provide an interface for this.)\n'
' - Automatic silent updates only: Updates are only applied when they are '
'found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for updates '
'using each application\'s manual update mechanism if available. If you '
'disable updates, you should periodically check for updates and '
'distribute them to users.\n\n'
'Only affects updates for Google software that uses Google Update for '
'updates. Does not prevent auto-updates of Google software that does not '
'use Google Update for updates.\n\n'
'Updates for Google Update are not affected by this setting; Google '
'Update will continue to update itself while it is installed.\n\n'
'WARNING: Disabing updates will also prevent updates of any new Google '
'applications released in the future, possibly including dependencies for '
'future versions of installed applications.\n\n'
'%s' % ADML_DOMAIN_REQUIREMENT_EN),
]
ADML_PRESENTATIONS = '''\
<presentation id="Pol_AutoUpdateCheckPeriod">
<decimalTextBox refId="Part_AutoUpdateCheckPeriod" defaultValue="1400"
spinStep="60">Minutes between update checks</decimalTextBox>
</presentation>
<presentation id="Pol_UpdateCheckSuppressedPeriod">
<decimalTextBox refId="Part_UpdateCheckSuppressedStartHour"
defaultValue="0" spinStep="1">Hour</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedStartMin"
defaultValue="0" spinStep="1">Minute</decimalTextBox>
<decimalTextBox refId="Part_UpdateCheckSuppressedDurationMin"
defaultValue="60">Duration</decimalTextBox>
</presentation>
<presentation id="Pol_DownloadPreference">
<dropdownList refId="Part_DownloadPreference"
defaultItem="0">Type of download URL to request</dropdownList>
</presentation>
<presentation id="Pol_ProxyMode">
<dropdownList refId="Part_ProxyMode"
defaultItem="0">Choose how to specify proxy server settings
</dropdownList>
</presentation>
<presentation id="Pol_ProxyServer">
<textBox refId="Part_ProxyServer">
<label>Address or URL of proxy server</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_ProxyPacUrl">
<textBox refId="Part_ProxyPacUrl">
<label>URL to a proxy .pac file</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_DefaultAllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_DefaultUpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_AllowInstallation">
<dropdownList refId="Part_InstallPolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_UpdatePolicy">
<dropdownList refId="Part_UpdatePolicy"
defaultItem="0">Policy</dropdownList>
</presentation>
<presentation id="Pol_TargetVersionPrefix">
<textBox refId="Part_TargetVersionPrefix">
<label>Target version prefix</label>
<defaultValue></defaultValue>
</textBox>
</presentation>
<presentation id="Pol_RollbackToTargetVersion" />
'''
ADML_RESOURCE_TABLE_TEMPLATE = '''
<resources>
<stringTable>
%s
</stringTable>
<presentationTable>
%s
</presentationTable>
</resources>
'''
ADML_FOOTER = '</policyDefinitionResources>'
def GenerateGroupPolicyTemplateAdml(apps):
"""Generates a Group Policy template (ADML format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
Returns:
String containing the contents of the .ADML file.
"""
string_definition_list = ADML_PREDEFINED_STRINGS_TABLE_EN[:]
for app in apps:
app_name = app[0]
app_legal_id = _CreateLegalIdentifier(app_name)
app_additional_help_msg = app[2]
rollback_disclaimer = app[3]
if not rollback_disclaimer:
rollback_disclaimer = ADML_DEFAULT_ROLLBACK_DISCLAIMER
app_category = ('Cat_' + app_legal_id, app_name)
string_definition_list.append(app_category)
app_install_policy_explanation = (
'Explain_Install' + app_legal_id,
'Specifies whether %s can be installed using Google Update/Google '
'Installer.\n\n'
'If this policy is not configured, %s can be installed as specified '
'by "Allow installation default".\n\n'
'%s' % (app_name, app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_install_policy_explanation)
app_auto_update_policy_explanation = (
'Explain_AutoUpdate' + app_legal_id,
'Specifies how Google Update handles available %s updates '
'from Google.\n\n'
'If this policy is not configured, Google Update handles available '
'updates as specified by "Update policy override default".\n\n'
'Options:\n'
' - Always allow updates: Updates are always applied when found, '
'either by periodic update check or by a manual update check.\n'
' - Manual updates only: Updates are only applied when the user '
'does a manual update check. (Not all apps provide an interface '
' for this.)\n'
' - Automatic silent updates only: Updates are only applied when '
'they are found via the periodic update check.\n'
' - Updates disabled: Never apply updates.\n\n'
'If you select manual updates, you should periodically check for '
'updates using the application\'s manual update mechanism if '
'available. If you disable updates, you should periodically check '
'for updates and distribute them to users.%s\n\n'
'%s' %
(app_name, app_additional_help_msg, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_auto_update_policy_explanation)
app_target_version_prefix_explanation = (
'Explain_TargetVersionPrefix' + app_legal_id,
'Specifies which version %s should be updated to.\n\n'
'When this policy is enabled, the app will be updated to the version '
'prefixed with this policy value.\n\nSome examples:\n'
'1) Not configured: app will be updated to the latest version '
'available.\n'
'2) Policy value is set to "55.": the app will be updated to any minor '
'version of 55 (e.g., 55.24.34 or 55.60.2).\n'
'3) Policy value is "55.2.": the app will be updated to any minor '
'version of 55.2 (e.g., 55.2.34 or 55.2.2).\n'
'4) Policy value is "55.24.34": the app will be updated to this '
'specific version only.\n\n'
'%s' % (app_name, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_target_version_prefix_explanation)
app_rollback_to_target_version_explanation = (
'Explain_RollbackToTargetVersion' + app_legal_id,
'Specifies that Google Update should roll installations of %s back to '
'the version indicated by "Target version prefix override".\n\n'
'This policy setting has no effect unless "Target version prefix '
'override" is set.\n\n'
'If this policy is not configured or is disabled, installs that have a '
'version higher than that specified by "Target version prefix '
'override" will be left as-is.\n\n'
'If this policy is enabled, installs that have a version higher than '
'that specified by "Target version prefix override" will be downgraded '
'to the highest available version that matches the target version.\n\n'
'%s\n\n'
'%s' % (app_name, rollback_disclaimer, ADML_DOMAIN_REQUIREMENT_EN))
string_definition_list.append(app_rollback_to_target_version_explanation)
app_resource_strings = []
for entry in string_definition_list:
app_resource_strings.append(' <string id="%s">%s</string>' %
(entry[0], entry[1]))
app_resource_tables = (ADML_RESOURCE_TABLE_TEMPLATE %
('\n'.join(app_resource_strings), ADML_PRESENTATIONS))
target_contents = [
ADML_HEADER,
ADML_ENVIRONMENT,
app_resource_tables,
ADML_FOOTER,
]
return ''.join(target_contents)
def WriteGroupPolicyTemplateAdmx(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdmx(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
def WriteGroupPolicyTemplateAdml(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line.
"""
contents = GenerateGroupPolicyTemplateAdml(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
# Run a unit test when the module is run directly.
if __name__ == '__main__':
TEST_APPS = [
('Google Test Foo', '{D6B08267-B440-4c85-9F79-E195E80D9937}',
' Check http://www.google.com/test_foo/.',
'Disclaimer'),
(u'Google User Test Foo\u00a9\u00ae\u2122',
'{104844D6-7DDA-460b-89F0-FBF8AFDD0A67}',
' Check http://www.google.com/user_test_foo/.',
''),
]
module_dir = os.path.abspath(os.path.dirname(__file__))
gold_path = os.path.join(module_dir, 'test_gold.admx')
output_path = os.path.join(module_dir, 'test_out.admx')
WriteGroupPolicyTemplateAdmx(output_path, TEST_APPS)
admx_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not admx_files_equal:
print('FAIL: ADMX files are not equal.')
gold_path = os.path.join(module_dir, 'test_gold.adml')
output_path = os.path.join(module_dir, 'test_out.adml')
WriteGroupPolicyTemplateAdml(output_path, TEST_APPS)
adml_files_equal = filecmp.cmp(gold_path, output_path, shallow=False)
if not adml_files_equal:
print('FAIL: ADML files are not equal.')
if admx_files_equal and adml_files_equal:
print('SUCCESS. contents are equal')
else:
sys.exit(-1)
|
GetLoss
|
Compute loss and also deriv w.r.t to it if asked for.
Compute the loss function. Targets should be in self.data, predictions
should be in self.state.
Args:
get_deriv: If True, compute the derivative w.r.t the loss function and put
it in self.deriv.
|
from layer import *
class LogisticLayer(Layer):
def __init__(self, *args, **kwargs):
super(LogisticLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == deepnet_pb2.Hyperparams.LOGISTIC
def ApplyActivation(self):
cm.sigmoid(self.state)
def Sample(self):
self.state.sample_bernoulli(target=self.sample)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
self.deriv.apply_logistic_deriv(self.state)
# MASKED: GetLoss function (lines 21-62)
def GetSparsityDivisor(self):
self.means_temp2.assign(1)
self.means_temp2.subtract(self.means)
self.means_temp2.mult(self.means)
return self.means_temp2
|
def GetLoss(self, get_deriv=False, acc_deriv=False, **kwargs):
"""Compute loss and also deriv w.r.t to it if asked for.
Compute the loss function. Targets should be in self.data, predictions
should be in self.state.
Args:
get_deriv: If True, compute the derivative w.r.t the loss function and put
it in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
tiny = self.tiny
if self.loss_function == deepnet_pb2.Layer.CROSS_ENTROPY:
data = self.data
state = self.state
temp1 = self.statesize
cm.cross_entropy_bernoulli(data, state, target=temp1, tiny=self.tiny)
perf.cross_entropy = temp1.sum()
cm.correct_preds(data, state, target=temp1, cutoff=0.5)
perf.correct_preds = temp1.sum()
if get_deriv:
self.state.subtract(self.data, target=self.deriv)
elif self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
target = self.statesize
self.state.subtract(self.data, target=target)
error = target.euclid_norm()**2
perf.error = error
if acc_deriv:
self.deriv.add_mult(target, alpha=self.loss_weight)
else:
self.deriv.assign(target)
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for logistic units.')
return perf
| 21 | 62 |
from layer import *
class LogisticLayer(Layer):
def __init__(self, *args, **kwargs):
super(LogisticLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == deepnet_pb2.Hyperparams.LOGISTIC
def ApplyActivation(self):
cm.sigmoid(self.state)
def Sample(self):
self.state.sample_bernoulli(target=self.sample)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
self.deriv.apply_logistic_deriv(self.state)
def GetLoss(self, get_deriv=False, acc_deriv=False, **kwargs):
"""Compute loss and also deriv w.r.t to it if asked for.
Compute the loss function. Targets should be in self.data, predictions
should be in self.state.
Args:
get_deriv: If True, compute the derivative w.r.t the loss function and put
it in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
tiny = self.tiny
if self.loss_function == deepnet_pb2.Layer.CROSS_ENTROPY:
data = self.data
state = self.state
temp1 = self.statesize
cm.cross_entropy_bernoulli(data, state, target=temp1, tiny=self.tiny)
perf.cross_entropy = temp1.sum()
cm.correct_preds(data, state, target=temp1, cutoff=0.5)
perf.correct_preds = temp1.sum()
if get_deriv:
self.state.subtract(self.data, target=self.deriv)
elif self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
target = self.statesize
self.state.subtract(self.data, target=target)
error = target.euclid_norm()**2
perf.error = error
if acc_deriv:
self.deriv.add_mult(target, alpha=self.loss_weight)
else:
self.deriv.assign(target)
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for logistic units.')
return perf
def GetSparsityDivisor(self):
self.means_temp2.assign(1)
self.means_temp2.subtract(self.means)
self.means_temp2.mult(self.means)
return self.means_temp2
|
__str__
|
[summary] BoxWindow: :math:`[a_1, b_1] imes [a_2, b_2] imes \cdots`
Returns:
[str]: [description of the Box's bounds]
|
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
# MASKED: __str__ function (lines 18-49)
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
| 18 | 49 |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
__contains__
|
[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
|
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
# MASKED: __contains__ function (lines 59-77)
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
| 59 | 77 |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
center
|
[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
|
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
# MASKED: center function (lines 115-126)
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
| 115 | 126 |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
rand
|
[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
|
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
# MASKED: rand function (lines 128-149)
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
| 128 | 149 |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
__init__
|
[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
|
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
# MASKED: __init__ function (lines 171-179)
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
| 171 | 179 |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
__contains__
|
[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
|
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
# MASKED: __contains__ function (lines 181-198)
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
| 181 | 198 |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
center
|
[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
|
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
# MASKED: center function (lines 236-248)
|
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
| 236 | 248 |
import numpy as np
from lab2.utils import get_random_number_generator
# todo clean up the docstrings
class BoxWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, args):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.bounds = args
def __str__(self):
"""[summary] BoxWindow: :math:`[a_1, b_1] \times [a_2, b_2] \times \cdots`
Returns:
[str]: [description of the Box's bounds]
"""
shape = (self.bounds).shape
representation = "BoxWindow: "
# * consider for a, b in self.bounds
# ! use f-strings
for i in range(shape[0] - 1): # ? why not self.dimension()
representation = (
representation
+ "["
+ str((self.bounds)[i][0])
+ ", "
+ str((self.bounds)[i][1])
+ "]"
+ " x "
)
representation = (
representation
+ "["
+ str((self.bounds)[shape[0] - 1][0])
+ ", "
+ str((self.bounds)[shape[0] - 1][1])
+ "]"
)
return representation
def __len__(self):
"""[summary]
Returns:
[int: [the dimension of the box]
"""
return ((self.bounds).shape)[0] # * no need to use ()
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the box
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# * consider for (a, b), x in zip(self.bounds, point)
# * or exploit numpy vectorization power
flag = True
for i in range(self.__len__()): # ? use len(self) of self.dimension
if args[i] >= self.bounds[i][0] and args[i] <= self.bounds[i][1]:
flag = True
else:
return False
return flag # ? flag is never modified and always returns True
# todo write tests
def dimension(self):
"""[summary]
This method is similar to the method __len__ described above
"""
return self.__len__() # ? why not using use len(self)
# todo write tests
def volume(self):
"""[summary]
This method calculates the volume of the Box
"""
v = 1
# * exploit numpy vectors, use - or np.diff, and np.prod
for i in range(self.dimension()):
# * use *= operator
v = v * abs((self.bounds[i][1] - self.bounds[i][0]))
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the box , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the box
Returns:
[numpy array list]: [the center of the box]
"""
# * Nice try!
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
def rand(self, n=1, rng=None):
"""[summary]
Generate ``n`` points uniformly at random inside the :py:class:`BoxWindow`.
Args:
n (int, optional): [description]. Defaults to 1.
rng ([type], optional): [description]. Defaults to None.
Returns:
Randomly n elements that belong to the box
"""
rng = get_random_number_generator(rng)
# ? readability why not using self.dimension()
L = np.ones((n, self.__len__())) # liste des points
# * exploit numpy, rng.uniform(a, b, size=n)
for i in range(n):
for j in range(self.__len__()):
x = rng.random()
L[i][j] = (1 - x) * self.bounds[j][0] + x * self.bounds[j][1]
return L
class UnitBoxWindow(BoxWindow):
def __init__(self, center, dimension):
"""[summary]a subclass of BoxWindow,represents the notion of "unit square box"
Args:
dimension ([int]): [dimension of the Unit Box]
center ([numpy array list], optional): [center of the Box].
"""
# * exploit numpy vectors, use - or np.diff, and +
self.bounds = np.array(
[[center[i] - 0.5, center[i] + 0.5] for i in range(dimension)]
)
super().__init__(self.bounds) # * Nice call to super
# todo write tests
class BallWindow:
"""[summary]BoxWindow class representing a virtual n-dimensional bounded Box"""
def __init__(self, center, radius, dimension):
"""[summary]Initialization of Box's parameters
Args:
args ([numpy array list]): [this argument represents the bounds of the box]
"""
self.dim = dimension
self.rad = radius
self.cent = center
def __contains__(self, args):
"""[summary]This method tests if an element (args) is inside the ball
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# * same remarks as in BoxWindow.__contains__
flag = True
if len(args) != self.dim:
return False
else:
if np.linalg.norm(args - self.center) <= self.rad:
flag = True
return flag
def dimension(self):
"""[summary]
This method gives the dimension of the ball
"""
return self.dim
def volume(self):
r"""[summary]
This method calculates the volume of the Ball using the formula :math:` V_{n+1} =\int_{-r}^{r}V_{n}(\sqrt{r^2 -x^2})dx`
"""
# * iteresting recursive try
# todo test the method
# * exploit numpy vectors, use - or np.diff, and np.prod
v = 1
for i in range(self.dimension()):
integ = lambda x: v * np.sqrt(self.rad ** 2 - x ** 2)
v = integrate.quad(integ, -self.rad, self.rad) # ! integrate is not defined
return v
def indicator_function(self, args):
"""[summary]
This method is similar to the method __contains__ described above
Args:
args ([numpy array list]): [the element to test]
Returns:
[bool]: [True if the element is inside the ball , False if not]
"""
# ? isn't it equivalent to return args in self
if self.__contains__(args):
return True
else:
return False
def center(self):
"""[summary] determinate the center of the ball
Returns:
[numpy array list]: [the center of the ball]
"""
# * interesting try
# * exploit numpy vectorization power
# ? how about np.mean(self.bounds)
c = np.zeros(self.__len__())
for i in range(self.__len__()):
c[i] = np.mean(self.bounds[i])
return c
|
_get_bbox_regression_labels
|
Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
|
# --------------------------------------------------------
# Adapted from Faster R-CNN (https://github.com/rbgirshick/py-faster-rcnn)
# Written by Danfei Xu
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
#from datasets.viz import viz_scene_graph
import data_utils
from IPython import embed
from utils.timer import Timer
def get_minibatch(roidb, num_classes):
"""Given a mini batch of roidb, construct a data blob from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
im_timer = Timer()
im_timer.tic()
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
im_timer.toc()
blobs = {'ims': im_blob}
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
rels_blob = np.zeros((0, 3), dtype=np.int32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
all_overlaps = []
box_idx_offset = 0
d_timer = Timer()
d_timer.tic()
for im_i in xrange(num_images):
# sample graph
roi_inds, rels = _sample_graph(roidb[im_i],
fg_rois_per_image,
rois_per_image,
num_neg_rels=cfg.TRAIN.NUM_NEG_RELS)
if rels.size == 0:
print('batch skipped')
return None
# gather all samples based on the sampled graph
rels, labels, overlaps, im_rois, bbox_targets, bbox_inside_weights =\
_gather_samples(roidb[im_i], roi_inds, rels, num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1)) #im id for roi_pooling
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
all_overlaps = np.hstack((all_overlaps, overlaps))
# offset the relationship reference idx the number of previously
# added box
rels_offset = rels.copy()
rels_offset[:, :2] += box_idx_offset
rels_blob = np.vstack([rels_blob, rels_offset])
box_idx_offset += rois.shape[0]
#viz_inds = np.where(overlaps == 1)[0] # ground truth
#viz_inds = npr.choice(np.arange(rois.shape[0]), size=50, replace=False) # random sample
#viz_inds = np.where(overlaps > cfg.TRAIN.FG_THRESH)[0] # foreground
#viz_scene_graph(im_blob[im_i], rois, labels, viz_inds, rels)
blobs['rois'] = rois_blob.copy()
blobs['labels'] = labels_blob.copy().astype(np.int32)
blobs['relations'] = rels_blob[:,:2].copy().astype(np.int32)
blobs['predicates'] = rels_blob[:,2].copy().astype(np.int32)
blobs['bbox_targets'] = bbox_targets_blob.copy()
blobs['bbox_inside_weights'] = bbox_inside_blob.copy()
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32).copy()
num_roi = rois_blob.shape[0]
num_rel = rels_blob.shape[0]
blobs['rel_rois'] = data_utils.compute_rel_rois(num_rel,
rois_blob,
rels_blob)
d_timer.toc()
graph_dict = data_utils.create_graph_data(num_roi, num_rel, rels_blob[:, :2])
for k in graph_dict:
blobs[k] = graph_dict[k]
return blobs
def _gather_samples(roidb, roi_inds, rels, num_classes):
"""
join all samples and produce sampled items
"""
rois = roidb['boxes']
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
# decide bg rois
bg_inds = np.where(overlaps < cfg.TRAIN.FG_THRESH)[0]
labels = labels.copy()
labels[bg_inds] = 0
labels = labels[roi_inds]
# print('num bg = %i' % np.where(labels==0)[0].shape[0])
# rois and bbox targets
overlaps = overlaps[roi_inds]
rois = rois[roi_inds]
# convert rel index
roi_ind_map = {}
for i, roi_i in enumerate(roi_inds):
roi_ind_map[roi_i] = i
for i, rel in enumerate(rels):
rels[i] = [roi_ind_map[rel[0]], roi_ind_map[rel[1]], rel[2]]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][roi_inds, :], num_classes)
return rels, labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _sample_graph(roidb, num_fg_rois, num_rois, num_neg_rels=128):
"""
Sample a graph from the foreground rois of an image
roidb: roidb of an image
rois_per_image: maximum number of rois per image
"""
gt_rels = roidb['gt_relations']
# index of assigned gt box for foreground boxes
fg_gt_ind_assignments = roidb['fg_gt_ind_assignments']
# find all fg proposals that are mapped to a gt
gt_to_fg_roi_inds = {}
all_fg_roi_inds = []
for ind, gt_ind in fg_gt_ind_assignments.items():
if gt_ind not in gt_to_fg_roi_inds:
gt_to_fg_roi_inds[gt_ind] = []
gt_to_fg_roi_inds[gt_ind].append(ind)
all_fg_roi_inds.append(ind)
# print('gt rois = %i' % np.where(roidb['max_overlaps']==1)[0].shape[0])
# print('assigned gt = %i' % len(gt_to_fg_roi_inds.keys()))
# dedup the roi inds
all_fg_roi_inds = np.array(list(set(all_fg_roi_inds)))
# find all valid relations in fg objects
pos_rels = []
for rel in gt_rels:
for sub_i in gt_to_fg_roi_inds[rel[0]]:
for obj_i in gt_to_fg_roi_inds[rel[1]]:
pos_rels.append([sub_i, obj_i, rel[2]])
# print('num fg rois = %i' % all_fg_roi_inds.shape[0])
rels = []
rels_inds = []
roi_inds = []
if len(pos_rels) > 0:
# de-duplicate the relations
_, indices = np.unique(["{} {}".format(i, j) for i,j,k in pos_rels], return_index=True)
pos_rels = np.array(pos_rels)[indices, :]
# print('num pos rels = %i' % pos_rels.shape[0])
# construct graph based on valid relations
for rel in pos_rels:
roi_inds += rel[:2].tolist()
roi_inds = list(set(roi_inds)) # keep roi inds unique
rels.append(rel)
rels_inds.append(rel[:2].tolist())
if len(roi_inds) >= num_fg_rois:
break
# print('sampled rels = %i' % len(rels))
roi_candidates = np.setdiff1d(all_fg_roi_inds, roi_inds)
num_rois_to_sample = min(num_fg_rois - len(roi_inds), len(roi_candidates))
# if not enough rois, sample fg rois
if num_rois_to_sample > 0:
roi_sample = npr.choice(roi_candidates, size=num_rois_to_sample,
replace=False)
roi_inds = np.hstack([roi_inds, roi_sample])
# sample background relations
sample_rels = []
sample_rels_inds = []
for i in roi_inds:
for j in roi_inds:
if i != j and [i, j] not in rels_inds:
sample_rels.append([i,j,0])
sample_rels_inds.append([i,j])
if len(sample_rels) > 0:
# randomly sample negative edges to prevent no edges
num_neg_rels = np.minimum(len(sample_rels), num_neg_rels)
inds = npr.choice(np.arange(len(sample_rels)), size=num_neg_rels, replace=False)
rels += [sample_rels[i] for i in inds]
rels_inds += [sample_rels_inds[i] for i in inds]
# if still not enough rois, sample bg rois
num_rois_to_sample = num_rois - len(roi_inds)
if num_rois_to_sample > 0:
bg_roi_inds = _sample_bg_rois(roidb, num_rois_to_sample)
roi_inds = np.hstack([roi_inds, bg_roi_inds])
roi_inds = np.array(roi_inds).astype(np.int64)
# print('sampled rois = %i' % roi_inds.shape[0])
return roi_inds.astype(np.int64), np.array(rels).astype(np.int64)
def _sample_bg_rois(roidb, num_bg_rois):
"""
Sample rois from background
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
bg_inds = np.where(((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO)) |
(labels == 0))[0]
bg_rois_per_this_image = np.minimum(num_bg_rois, bg_inds.size)
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
return bg_inds
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = roidb[i]['image']() # use image getter
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
# MASKED: _get_bbox_regression_labels function (lines 279-301)
|
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind].astype(np.int64)
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
| 279 | 301 |
# --------------------------------------------------------
# Adapted from Faster R-CNN (https://github.com/rbgirshick/py-faster-rcnn)
# Written by Danfei Xu
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
#from datasets.viz import viz_scene_graph
import data_utils
from IPython import embed
from utils.timer import Timer
def get_minibatch(roidb, num_classes):
"""Given a mini batch of roidb, construct a data blob from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
im_timer = Timer()
im_timer.tic()
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
im_timer.toc()
blobs = {'ims': im_blob}
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
rels_blob = np.zeros((0, 3), dtype=np.int32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
all_overlaps = []
box_idx_offset = 0
d_timer = Timer()
d_timer.tic()
for im_i in xrange(num_images):
# sample graph
roi_inds, rels = _sample_graph(roidb[im_i],
fg_rois_per_image,
rois_per_image,
num_neg_rels=cfg.TRAIN.NUM_NEG_RELS)
if rels.size == 0:
print('batch skipped')
return None
# gather all samples based on the sampled graph
rels, labels, overlaps, im_rois, bbox_targets, bbox_inside_weights =\
_gather_samples(roidb[im_i], roi_inds, rels, num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1)) #im id for roi_pooling
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
all_overlaps = np.hstack((all_overlaps, overlaps))
# offset the relationship reference idx the number of previously
# added box
rels_offset = rels.copy()
rels_offset[:, :2] += box_idx_offset
rels_blob = np.vstack([rels_blob, rels_offset])
box_idx_offset += rois.shape[0]
#viz_inds = np.where(overlaps == 1)[0] # ground truth
#viz_inds = npr.choice(np.arange(rois.shape[0]), size=50, replace=False) # random sample
#viz_inds = np.where(overlaps > cfg.TRAIN.FG_THRESH)[0] # foreground
#viz_scene_graph(im_blob[im_i], rois, labels, viz_inds, rels)
blobs['rois'] = rois_blob.copy()
blobs['labels'] = labels_blob.copy().astype(np.int32)
blobs['relations'] = rels_blob[:,:2].copy().astype(np.int32)
blobs['predicates'] = rels_blob[:,2].copy().astype(np.int32)
blobs['bbox_targets'] = bbox_targets_blob.copy()
blobs['bbox_inside_weights'] = bbox_inside_blob.copy()
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32).copy()
num_roi = rois_blob.shape[0]
num_rel = rels_blob.shape[0]
blobs['rel_rois'] = data_utils.compute_rel_rois(num_rel,
rois_blob,
rels_blob)
d_timer.toc()
graph_dict = data_utils.create_graph_data(num_roi, num_rel, rels_blob[:, :2])
for k in graph_dict:
blobs[k] = graph_dict[k]
return blobs
def _gather_samples(roidb, roi_inds, rels, num_classes):
"""
join all samples and produce sampled items
"""
rois = roidb['boxes']
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
# decide bg rois
bg_inds = np.where(overlaps < cfg.TRAIN.FG_THRESH)[0]
labels = labels.copy()
labels[bg_inds] = 0
labels = labels[roi_inds]
# print('num bg = %i' % np.where(labels==0)[0].shape[0])
# rois and bbox targets
overlaps = overlaps[roi_inds]
rois = rois[roi_inds]
# convert rel index
roi_ind_map = {}
for i, roi_i in enumerate(roi_inds):
roi_ind_map[roi_i] = i
for i, rel in enumerate(rels):
rels[i] = [roi_ind_map[rel[0]], roi_ind_map[rel[1]], rel[2]]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][roi_inds, :], num_classes)
return rels, labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _sample_graph(roidb, num_fg_rois, num_rois, num_neg_rels=128):
"""
Sample a graph from the foreground rois of an image
roidb: roidb of an image
rois_per_image: maximum number of rois per image
"""
gt_rels = roidb['gt_relations']
# index of assigned gt box for foreground boxes
fg_gt_ind_assignments = roidb['fg_gt_ind_assignments']
# find all fg proposals that are mapped to a gt
gt_to_fg_roi_inds = {}
all_fg_roi_inds = []
for ind, gt_ind in fg_gt_ind_assignments.items():
if gt_ind not in gt_to_fg_roi_inds:
gt_to_fg_roi_inds[gt_ind] = []
gt_to_fg_roi_inds[gt_ind].append(ind)
all_fg_roi_inds.append(ind)
# print('gt rois = %i' % np.where(roidb['max_overlaps']==1)[0].shape[0])
# print('assigned gt = %i' % len(gt_to_fg_roi_inds.keys()))
# dedup the roi inds
all_fg_roi_inds = np.array(list(set(all_fg_roi_inds)))
# find all valid relations in fg objects
pos_rels = []
for rel in gt_rels:
for sub_i in gt_to_fg_roi_inds[rel[0]]:
for obj_i in gt_to_fg_roi_inds[rel[1]]:
pos_rels.append([sub_i, obj_i, rel[2]])
# print('num fg rois = %i' % all_fg_roi_inds.shape[0])
rels = []
rels_inds = []
roi_inds = []
if len(pos_rels) > 0:
# de-duplicate the relations
_, indices = np.unique(["{} {}".format(i, j) for i,j,k in pos_rels], return_index=True)
pos_rels = np.array(pos_rels)[indices, :]
# print('num pos rels = %i' % pos_rels.shape[0])
# construct graph based on valid relations
for rel in pos_rels:
roi_inds += rel[:2].tolist()
roi_inds = list(set(roi_inds)) # keep roi inds unique
rels.append(rel)
rels_inds.append(rel[:2].tolist())
if len(roi_inds) >= num_fg_rois:
break
# print('sampled rels = %i' % len(rels))
roi_candidates = np.setdiff1d(all_fg_roi_inds, roi_inds)
num_rois_to_sample = min(num_fg_rois - len(roi_inds), len(roi_candidates))
# if not enough rois, sample fg rois
if num_rois_to_sample > 0:
roi_sample = npr.choice(roi_candidates, size=num_rois_to_sample,
replace=False)
roi_inds = np.hstack([roi_inds, roi_sample])
# sample background relations
sample_rels = []
sample_rels_inds = []
for i in roi_inds:
for j in roi_inds:
if i != j and [i, j] not in rels_inds:
sample_rels.append([i,j,0])
sample_rels_inds.append([i,j])
if len(sample_rels) > 0:
# randomly sample negative edges to prevent no edges
num_neg_rels = np.minimum(len(sample_rels), num_neg_rels)
inds = npr.choice(np.arange(len(sample_rels)), size=num_neg_rels, replace=False)
rels += [sample_rels[i] for i in inds]
rels_inds += [sample_rels_inds[i] for i in inds]
# if still not enough rois, sample bg rois
num_rois_to_sample = num_rois - len(roi_inds)
if num_rois_to_sample > 0:
bg_roi_inds = _sample_bg_rois(roidb, num_rois_to_sample)
roi_inds = np.hstack([roi_inds, bg_roi_inds])
roi_inds = np.array(roi_inds).astype(np.int64)
# print('sampled rois = %i' % roi_inds.shape[0])
return roi_inds.astype(np.int64), np.array(rels).astype(np.int64)
def _sample_bg_rois(roidb, num_bg_rois):
"""
Sample rois from background
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
bg_inds = np.where(((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO)) |
(labels == 0))[0]
bg_rois_per_this_image = np.minimum(num_bg_rois, bg_inds.size)
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
return bg_inds
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = roidb[i]['image']() # use image getter
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind].astype(np.int64)
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
|
_chain_future
|
Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
|
import concurrent.futures
import threading
from asyncio import coroutines
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
import attr
import uuid
import asyncio
from asyncio import ensure_future
from typing import Any, Union, Coroutine, Callable, Generator, TypeVar, \
Awaitable
from merceedge.settings import (
logger_access,
logger_code,
logger_console
)
# pylint: disable=invalid-name
T = TypeVar('T')
CALLABLE_T = TypeVar('CALLABLE_T', bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
_LOGGER = logger_code
try:
# pylint: disable=invalid-name
asyncio_run = asyncio.run # type: ignore
except AttributeError:
_LOGGER.info("env <python 3.7")
_T = TypeVar('_T')
def asyncio_run(main: Awaitable[_T], *, debug: bool = False) -> _T:
"""Minimal re-implementation of asyncio.run (since 3.7)."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
return loop.run_until_complete(main)
finally:
print('loop final')
asyncio.set_event_loop(None)
loop.close()
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, '_edge_callback', True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, '_edge_callback', False) is True
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id = attr.ib(
type=str,
default=None,
)
id = attr.ib(
type=str,
default=attr.Factory(lambda: uuid.uuid4().hex),
)
def as_dict(self) -> dict:
"""Return a dictionary representation of the context."""
return {
'id': self.id,
'user_id': self.user_id,
}
def _set_result_unless_cancelled(fut: Future, result: Any) -> None:
"""Set the result only if the Future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(
concurr: concurrent.futures.Future,
source: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source: Union[concurrent.futures.Future, Future],
dest: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
# MASKED: _chain_future function (lines 125-171)
def run_coroutine_threadsafe(
coro: Union[Coroutine, Generator],
loop: AbstractEventLoop) -> concurrent.futures.Future:
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def callback() -> None:
"""Handle the call to the coroutine."""
try:
_chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro: Coroutine,
loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(loop: AbstractEventLoop, callback: Callable,
*args: Any) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
NOTE: This code references home-assistant.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
|
def _chain_future(
source: Union[concurrent.futures.Future, Future],
destination: Union[concurrent.futures.Future, Future]) -> None:
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
if isinstance(source, Future):
source_loop = source._loop # type: ignore
else:
source_loop = None
if isinstance(destination, Future):
dest_loop = destination._loop # type: ignore
else:
dest_loop = None
def _set_state(future: Union[concurrent.futures.Future, Future],
other: Union[concurrent.futures.Future, Future]) -> None:
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(
destination: Union[concurrent.futures.Future, Future]) -> None:
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(
source: Union[concurrent.futures.Future, Future]) -> None:
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
| 125 | 171 |
import concurrent.futures
import threading
from asyncio import coroutines
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
import attr
import uuid
import asyncio
from asyncio import ensure_future
from typing import Any, Union, Coroutine, Callable, Generator, TypeVar, \
Awaitable
from merceedge.settings import (
logger_access,
logger_code,
logger_console
)
# pylint: disable=invalid-name
T = TypeVar('T')
CALLABLE_T = TypeVar('CALLABLE_T', bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
_LOGGER = logger_code
try:
# pylint: disable=invalid-name
asyncio_run = asyncio.run # type: ignore
except AttributeError:
_LOGGER.info("env <python 3.7")
_T = TypeVar('_T')
def asyncio_run(main: Awaitable[_T], *, debug: bool = False) -> _T:
"""Minimal re-implementation of asyncio.run (since 3.7)."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
return loop.run_until_complete(main)
finally:
print('loop final')
asyncio.set_event_loop(None)
loop.close()
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, '_edge_callback', True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, '_edge_callback', False) is True
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id = attr.ib(
type=str,
default=None,
)
id = attr.ib(
type=str,
default=attr.Factory(lambda: uuid.uuid4().hex),
)
def as_dict(self) -> dict:
"""Return a dictionary representation of the context."""
return {
'id': self.id,
'user_id': self.user_id,
}
def _set_result_unless_cancelled(fut: Future, result: Any) -> None:
"""Set the result only if the Future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(
concurr: concurrent.futures.Future,
source: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source: Union[concurrent.futures.Future, Future],
dest: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(
source: Union[concurrent.futures.Future, Future],
destination: Union[concurrent.futures.Future, Future]) -> None:
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
if isinstance(source, Future):
source_loop = source._loop # type: ignore
else:
source_loop = None
if isinstance(destination, Future):
dest_loop = destination._loop # type: ignore
else:
dest_loop = None
def _set_state(future: Union[concurrent.futures.Future, Future],
other: Union[concurrent.futures.Future, Future]) -> None:
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(
destination: Union[concurrent.futures.Future, Future]) -> None:
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(
source: Union[concurrent.futures.Future, Future]) -> None:
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(
coro: Union[Coroutine, Generator],
loop: AbstractEventLoop) -> concurrent.futures.Future:
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def callback() -> None:
"""Handle the call to the coroutine."""
try:
_chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro: Coroutine,
loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(loop: AbstractEventLoop, callback: Callable,
*args: Any) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
NOTE: This code references home-assistant.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
|
_get_through_model
|
Get the "through" model associated with this field.
Need to handle things differently for Django1.1 vs Django1.2
In 1.1 through is a string and through_model has class
In 1.2 through is the class
|
from django.db import models
from django.db.models.query import QuerySet, Q
from django.db.models.base import ModelBase
from django.db.models.fields.related import RelatedField
from django.conf import settings
from utils import NestedSet
from signals import pre_publish, post_publish
# this takes some inspiration from the publisher stuff in
# django-cms 2.0
# e.g. http://github.com/digi604/django-cms-2.0/blob/master/publisher/models.py
#
# but we want this to be a reusable/standalone app and have a few different needs
#
class PublishException(Exception):
pass
class UnpublishException(Exception):
pass
class PublishableQuerySet(QuerySet):
def changed(self):
'''all draft objects that have not been published yet'''
return self.filter(Publishable.Q_CHANGED)
def deleted(self):
'''public objects that need deleting'''
return self.filter(Publishable.Q_DELETED)
def draft(self):
'''all draft objects'''
return self.filter(Publishable.Q_DRAFT)
def draft_and_deleted(self):
return self.filter(Publishable.Q_DRAFT | Publishable.Q_DELETED)
def published(self):
'''all public/published objects'''
return self.filter(Publishable.Q_PUBLISHED)
def publish(self, all_published=None):
'''publish all models in this queryset'''
if all_published is None:
all_published = NestedSet()
for p in self:
p.publish(all_published=all_published)
def delete(self, mark_for_deletion=True):
'''
override delete so that we call delete on each object separately, as delete needs
to set some flags etc
'''
for p in self:
p.delete(mark_for_deletion=mark_for_deletion)
class PublishableManager(models.Manager):
def get_query_set(self):
return PublishableQuerySet(self.model)
def changed(self):
'''all draft objects that have not been published yet'''
return self.get_query_set().changed()
def deleted(self):
'''public objects that need deleting'''
return self.get_query_set().deleted()
def draft(self):
'''all draft objects'''
return self.get_query_set().draft()
def draft_and_deleted(self):
return self.get_query_set().draft_and_deleted()
def published(self):
'''all public/published objects'''
return self.get_query_set().published()
class PublishableBase(ModelBase):
def __new__(cls, name, bases, attrs):
new_class = super(PublishableBase, cls).__new__(cls, name, bases, attrs)
# insert an extra permission in for "Can publish"
# as well as a "method" to find name of publish_permission for this object
opts = new_class._meta
name = u'Can publish %s' % opts.verbose_name
code = u'publish_%s' % opts.object_name.lower()
opts.permissions = tuple(opts.permissions) + ((code, name), )
opts.get_publish_permission = lambda: code
return new_class
class Publishable(models.Model):
__metaclass__ = PublishableBase
PUBLISH_DEFAULT = 0
PUBLISH_CHANGED = 1
PUBLISH_DELETE = 2
PUBLISH_CHOICES = ((PUBLISH_DEFAULT, 'Published'), (PUBLISH_CHANGED, 'Changed'), (PUBLISH_DELETE, 'To be deleted'))
# make these available here so can easily re-use them in other code
Q_PUBLISHED = Q(is_public=True)
Q_DRAFT = Q(is_public=False) & ~Q(publish_state=PUBLISH_DELETE)
Q_CHANGED = Q(is_public=False, publish_state=PUBLISH_CHANGED)
Q_DELETED = Q(is_public=False, publish_state=PUBLISH_DELETE)
is_public = models.BooleanField(default=False, editable=False, db_index=True)
publish_state = models.IntegerField('Publication status', editable=False, db_index=True, choices=PUBLISH_CHOICES, default=PUBLISH_DEFAULT)
public = models.OneToOneField('self', related_name='draft', null=True,
editable=False, on_delete=models.SET_NULL)
class Meta:
abstract = True
class PublishMeta(object):
publish_exclude_fields = ['id', 'is_public', 'publish_state', 'public', 'draft']
publish_reverse_fields = []
publish_functions = {}
@classmethod
def _combined_fields(cls, field_name):
fields = []
for clazz in cls.__mro__:
fields.extend(getattr(clazz, field_name, []))
return fields
@classmethod
def excluded_fields(cls):
return cls._combined_fields('publish_exclude_fields')
@classmethod
def reverse_fields_to_publish(cls):
return cls._combined_fields('publish_reverse_fields')
@classmethod
def find_publish_function(cls, field_name, default_function):
'''
Search to see if there is a function to copy the given field over.
Function should take same params as setattr()
'''
for clazz in cls.__mro__:
publish_functions = getattr(clazz, 'publish_functions', {})
fn = publish_functions.get(field_name, None)
if fn:
return fn
return default_function
objects = PublishableManager()
def is_marked_for_deletion(self):
return self.publish_state == Publishable.PUBLISH_DELETE
def get_public_absolute_url(self):
if self.public:
get_absolute_url = getattr(self.public, 'get_absolute_url', None)
if get_absolute_url:
return get_absolute_url()
return None
def save(self, mark_changed=True, *arg, **kw):
if not self.is_public and mark_changed:
if self.publish_state == Publishable.PUBLISH_DELETE:
raise PublishException("Attempting to save model marked for deletion")
self.publish_state = Publishable.PUBLISH_CHANGED
super(Publishable, self).save(*arg, **kw)
def delete(self, mark_for_deletion=True):
if self.public and mark_for_deletion:
self.publish_state = Publishable.PUBLISH_DELETE
self.save(mark_changed=False)
else:
super(Publishable, self).delete()
def undelete(self):
self.publish_state = Publishable.PUBLISH_CHANGED
self.save(mark_changed=False)
def _pre_publish(self, dry_run, all_published, deleted=False):
if not dry_run:
sender = self.__class__
pre_publish.send(sender=sender, instance=self, deleted=deleted)
def _post_publish(self, dry_run, all_published, deleted=False):
if not dry_run:
# we need to make sure we get the instance that actually
# got published (in case it was indirectly published elsewhere)
sender = self.__class__
instance = all_published.original(self)
post_publish.send(sender=sender, instance=instance, deleted=deleted)
def publish(self, dry_run=False, all_published=None, parent=None):
'''
either publish changes or deletions, depending on
whether this model is public or draft.
public models will be examined to see if they need deleting
and deleted if so.
'''
if self.is_public:
raise PublishException("Cannot publish public model - publish should be called from draft model")
if self.pk is None:
raise PublishException("Please save model before publishing")
if self.publish_state == Publishable.PUBLISH_DELETE:
self.publish_deletions(dry_run=dry_run, all_published=all_published, parent=parent)
return None
else:
return self.publish_changes(dry_run=dry_run, all_published=all_published, parent=parent)
def unpublish(self, dry_run=False):
'''
unpublish models by deleting public model
'''
if self.is_public:
raise UnpublishException("Cannot unpublish a public model - unpublish should be called from draft model")
if self.pk is None:
raise UnpublishException("Please save the model before unpublishing")
public_model = self.public
if public_model and not dry_run:
self.public = None
self.save()
public_model.delete(mark_for_deletion=False)
return public_model
def _get_public_or_publish(self, *arg, **kw):
# only publish if we don't yet have an id for the
# public model
if self.public:
return self.public
return self.publish(*arg, **kw)
# MASKED: _get_through_model function (lines 244-256)
def _changes_need_publishing(self):
return self.publish_state == Publishable.PUBLISH_CHANGED or not self.public
def publish_changes(self, dry_run=False, all_published=None, parent=None):
'''
publish changes to the model - basically copy all of it's content to another copy in the
database.
if you set dry_run=True nothing will be written to the database. combined with
the all_published value one can therefore get information about what other models
would be affected by this function
'''
assert not self.is_public, "Cannot publish public model - publish should be called from draft model"
assert self.pk is not None, "Please save model before publishing"
# avoid mutual recursion
if all_published is None:
all_published = NestedSet()
if self in all_published:
return all_published.original(self).public
all_published.add(self, parent=parent)
self._pre_publish(dry_run, all_published)
public_version = self.public
if not public_version:
public_version = self.__class__(is_public=True)
excluded_fields = self.PublishMeta.excluded_fields()
reverse_fields_to_publish = self.PublishMeta.reverse_fields_to_publish()
if self._changes_need_publishing():
# copy over regular fields
for field in self._meta.fields:
if field.name in excluded_fields:
continue
value = getattr(self, field.name)
if isinstance(field, RelatedField):
related = field.rel.to
if issubclass(related, Publishable):
if value is not None:
value = value._get_public_or_publish(dry_run=dry_run, all_published=all_published, parent=self)
if not dry_run:
publish_function = self.PublishMeta.find_publish_function(field.name, setattr)
publish_function(public_version, field.name, value)
# save the public version and update
# state so we know everything is up-to-date
if not dry_run:
public_version.save()
self.public = public_version
self.publish_state = Publishable.PUBLISH_DEFAULT
self.save(mark_changed=False)
# copy over many-to-many fields
for field in self._meta.many_to_many:
name = field.name
if name in excluded_fields:
continue
m2m_manager = getattr(self, name)
public_objs = list(m2m_manager.all())
field_object, model, direct, m2m = self._meta.get_field_by_name(name)
through_model = self._get_through_model(field_object)
if through_model:
# see if we can work out which reverse relationship this is
# see if we are using our own "through" table or not
if issubclass(through_model, Publishable):
# this will be db name (e.g. with _id on end)
m2m_reverse_name = field_object.m2m_reverse_name()
for reverse_field in through_model._meta.fields:
if reverse_field.column == m2m_reverse_name:
related_name = reverse_field.name
related_field = getattr(through_model, related_name).field
reverse_name = related_field.related.get_accessor_name()
reverse_fields_to_publish.append(reverse_name)
break
continue # m2m via through table won't be dealt with here
related = field_object.rel.to
if issubclass(related, Publishable):
public_objs = [p._get_public_or_publish(dry_run=dry_run, all_published=all_published, parent=self) for p in public_objs]
if not dry_run:
public_m2m_manager = getattr(public_version, name)
old_objs = public_m2m_manager.exclude(pk__in=[p.pk for p in public_objs])
public_m2m_manager.remove(*old_objs)
public_m2m_manager.add(*public_objs)
# one-to-many and one-to-one reverse relations
for obj in self._meta.get_all_related_objects():
if issubclass(obj.model, Publishable):
name = obj.get_accessor_name()
if name in excluded_fields:
continue
if name not in reverse_fields_to_publish:
continue
if obj.field.rel.multiple:
related_items = getattr(self, name).all()
else:
try:
related_items = [getattr(self, name)]
except obj.model.DoesNotExist:
related_items = []
for related_item in related_items:
related_item.publish(dry_run=dry_run, all_published=all_published, parent=self)
# make sure we tidy up anything that needs deleting
if self.public and not dry_run:
if obj.field.rel.multiple:
public_ids = [r.public_id for r in related_items]
deleted_items = getattr(self.public, name).exclude(pk__in=public_ids)
deleted_items.delete(mark_for_deletion=False)
self._post_publish(dry_run, all_published)
return public_version
def publish_deletions(self, all_published=None, parent=None, dry_run=False):
'''
actually delete models that have been marked for deletion
'''
if self.publish_state != Publishable.PUBLISH_DELETE:
return
if all_published is None:
all_published = NestedSet()
if self in all_published:
return
all_published.add(self, parent=parent)
self._pre_publish(dry_run, all_published, deleted=True)
for related in self._meta.get_all_related_objects():
if not issubclass(related.model, Publishable):
continue
name = related.get_accessor_name()
if name in self.PublishMeta.excluded_fields():
continue
try:
instances = getattr(self, name).all()
except AttributeError:
instances = [getattr(self, name)]
for instance in instances:
instance.publish_deletions(all_published=all_published, parent=self, dry_run=dry_run)
if not dry_run:
public = self.public
self.delete(mark_for_deletion=False)
if public:
public.delete(mark_for_deletion=False)
self._post_publish(dry_run, all_published, deleted=True)
if getattr(settings, 'TESTING_PUBLISH', False):
# classes to test that publishing etc work ok
from datetime import datetime
class Site(models.Model):
title = models.CharField(max_length=100)
domain = models.CharField(max_length=100)
class FlatPage(Publishable):
url = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
enable_comments = models.BooleanField()
template_name = models.CharField(max_length=70, blank=True)
registration_required = models.BooleanField()
sites = models.ManyToManyField(Site)
class Meta:
ordering = ['url']
def get_absolute_url(self):
if self.is_public:
return self.url
return '%s*' % self.url
class Author(Publishable):
name = models.CharField(max_length=100)
profile = models.TextField(blank=True)
class PublishMeta(Publishable.PublishMeta):
publish_reverse_fields = ['authorprofile']
class AuthorProfile(Publishable):
author = models.OneToOneField(Author)
extra_profile = models.TextField(blank=True)
class ChangeLog(models.Model):
changed = models.DateTimeField(db_index=True, auto_now_add=True)
message = models.CharField(max_length=200)
class Tag(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.CharField(max_length=100)
# publishable model with a reverse relation to
# page (as a child)
class PageBlock(Publishable):
page=models.ForeignKey('Page')
content = models.TextField(blank=True)
# non-publishable reverse relation to page (as a child)
class Comment(models.Model):
page=models.ForeignKey('Page')
comment = models.TextField()
def update_pub_date(page, field_name, value):
# ignore value entirely and replace with now
setattr(page, field_name, update_pub_date.pub_date)
update_pub_date.pub_date = datetime.now()
class Page(Publishable):
slug = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
pub_date = models.DateTimeField(default=datetime.now)
parent = models.ForeignKey('self', blank=True, null=True)
authors = models.ManyToManyField(Author, blank=True)
log = models.ManyToManyField(ChangeLog, blank=True)
tags = models.ManyToManyField(Tag, through='PageTagOrder', blank=True)
class Meta:
ordering = ['slug']
class PublishMeta(Publishable.PublishMeta):
publish_exclude_fields = ['log']
publish_reverse_fields = ['pageblock_set']
publish_functions = { 'pub_date': update_pub_date }
def get_absolute_url(self):
if not self.parent:
return u'/%s/' % self.slug
return '%s%s/' % (self.parent.get_absolute_url(), self.slug)
class PageTagOrder(Publishable):
# note these are named in non-standard way to
# ensure we are getting correct names
tagged_page=models.ForeignKey(Page)
page_tag=models.ForeignKey(Tag)
tag_order=models.IntegerField()
|
def _get_through_model(self, field_object):
'''
Get the "through" model associated with this field.
Need to handle things differently for Django1.1 vs Django1.2
In 1.1 through is a string and through_model has class
In 1.2 through is the class
'''
through = field_object.rel.through
if through:
if isinstance(through, basestring):
return field_object.rel.through_model
return through
return None
| 244 | 256 |
from django.db import models
from django.db.models.query import QuerySet, Q
from django.db.models.base import ModelBase
from django.db.models.fields.related import RelatedField
from django.conf import settings
from utils import NestedSet
from signals import pre_publish, post_publish
# this takes some inspiration from the publisher stuff in
# django-cms 2.0
# e.g. http://github.com/digi604/django-cms-2.0/blob/master/publisher/models.py
#
# but we want this to be a reusable/standalone app and have a few different needs
#
class PublishException(Exception):
pass
class UnpublishException(Exception):
pass
class PublishableQuerySet(QuerySet):
def changed(self):
'''all draft objects that have not been published yet'''
return self.filter(Publishable.Q_CHANGED)
def deleted(self):
'''public objects that need deleting'''
return self.filter(Publishable.Q_DELETED)
def draft(self):
'''all draft objects'''
return self.filter(Publishable.Q_DRAFT)
def draft_and_deleted(self):
return self.filter(Publishable.Q_DRAFT | Publishable.Q_DELETED)
def published(self):
'''all public/published objects'''
return self.filter(Publishable.Q_PUBLISHED)
def publish(self, all_published=None):
'''publish all models in this queryset'''
if all_published is None:
all_published = NestedSet()
for p in self:
p.publish(all_published=all_published)
def delete(self, mark_for_deletion=True):
'''
override delete so that we call delete on each object separately, as delete needs
to set some flags etc
'''
for p in self:
p.delete(mark_for_deletion=mark_for_deletion)
class PublishableManager(models.Manager):
def get_query_set(self):
return PublishableQuerySet(self.model)
def changed(self):
'''all draft objects that have not been published yet'''
return self.get_query_set().changed()
def deleted(self):
'''public objects that need deleting'''
return self.get_query_set().deleted()
def draft(self):
'''all draft objects'''
return self.get_query_set().draft()
def draft_and_deleted(self):
return self.get_query_set().draft_and_deleted()
def published(self):
'''all public/published objects'''
return self.get_query_set().published()
class PublishableBase(ModelBase):
def __new__(cls, name, bases, attrs):
new_class = super(PublishableBase, cls).__new__(cls, name, bases, attrs)
# insert an extra permission in for "Can publish"
# as well as a "method" to find name of publish_permission for this object
opts = new_class._meta
name = u'Can publish %s' % opts.verbose_name
code = u'publish_%s' % opts.object_name.lower()
opts.permissions = tuple(opts.permissions) + ((code, name), )
opts.get_publish_permission = lambda: code
return new_class
class Publishable(models.Model):
__metaclass__ = PublishableBase
PUBLISH_DEFAULT = 0
PUBLISH_CHANGED = 1
PUBLISH_DELETE = 2
PUBLISH_CHOICES = ((PUBLISH_DEFAULT, 'Published'), (PUBLISH_CHANGED, 'Changed'), (PUBLISH_DELETE, 'To be deleted'))
# make these available here so can easily re-use them in other code
Q_PUBLISHED = Q(is_public=True)
Q_DRAFT = Q(is_public=False) & ~Q(publish_state=PUBLISH_DELETE)
Q_CHANGED = Q(is_public=False, publish_state=PUBLISH_CHANGED)
Q_DELETED = Q(is_public=False, publish_state=PUBLISH_DELETE)
is_public = models.BooleanField(default=False, editable=False, db_index=True)
publish_state = models.IntegerField('Publication status', editable=False, db_index=True, choices=PUBLISH_CHOICES, default=PUBLISH_DEFAULT)
public = models.OneToOneField('self', related_name='draft', null=True,
editable=False, on_delete=models.SET_NULL)
class Meta:
abstract = True
class PublishMeta(object):
publish_exclude_fields = ['id', 'is_public', 'publish_state', 'public', 'draft']
publish_reverse_fields = []
publish_functions = {}
@classmethod
def _combined_fields(cls, field_name):
fields = []
for clazz in cls.__mro__:
fields.extend(getattr(clazz, field_name, []))
return fields
@classmethod
def excluded_fields(cls):
return cls._combined_fields('publish_exclude_fields')
@classmethod
def reverse_fields_to_publish(cls):
return cls._combined_fields('publish_reverse_fields')
@classmethod
def find_publish_function(cls, field_name, default_function):
'''
Search to see if there is a function to copy the given field over.
Function should take same params as setattr()
'''
for clazz in cls.__mro__:
publish_functions = getattr(clazz, 'publish_functions', {})
fn = publish_functions.get(field_name, None)
if fn:
return fn
return default_function
objects = PublishableManager()
def is_marked_for_deletion(self):
return self.publish_state == Publishable.PUBLISH_DELETE
def get_public_absolute_url(self):
if self.public:
get_absolute_url = getattr(self.public, 'get_absolute_url', None)
if get_absolute_url:
return get_absolute_url()
return None
def save(self, mark_changed=True, *arg, **kw):
if not self.is_public and mark_changed:
if self.publish_state == Publishable.PUBLISH_DELETE:
raise PublishException("Attempting to save model marked for deletion")
self.publish_state = Publishable.PUBLISH_CHANGED
super(Publishable, self).save(*arg, **kw)
def delete(self, mark_for_deletion=True):
if self.public and mark_for_deletion:
self.publish_state = Publishable.PUBLISH_DELETE
self.save(mark_changed=False)
else:
super(Publishable, self).delete()
def undelete(self):
self.publish_state = Publishable.PUBLISH_CHANGED
self.save(mark_changed=False)
def _pre_publish(self, dry_run, all_published, deleted=False):
if not dry_run:
sender = self.__class__
pre_publish.send(sender=sender, instance=self, deleted=deleted)
def _post_publish(self, dry_run, all_published, deleted=False):
if not dry_run:
# we need to make sure we get the instance that actually
# got published (in case it was indirectly published elsewhere)
sender = self.__class__
instance = all_published.original(self)
post_publish.send(sender=sender, instance=instance, deleted=deleted)
def publish(self, dry_run=False, all_published=None, parent=None):
'''
either publish changes or deletions, depending on
whether this model is public or draft.
public models will be examined to see if they need deleting
and deleted if so.
'''
if self.is_public:
raise PublishException("Cannot publish public model - publish should be called from draft model")
if self.pk is None:
raise PublishException("Please save model before publishing")
if self.publish_state == Publishable.PUBLISH_DELETE:
self.publish_deletions(dry_run=dry_run, all_published=all_published, parent=parent)
return None
else:
return self.publish_changes(dry_run=dry_run, all_published=all_published, parent=parent)
def unpublish(self, dry_run=False):
'''
unpublish models by deleting public model
'''
if self.is_public:
raise UnpublishException("Cannot unpublish a public model - unpublish should be called from draft model")
if self.pk is None:
raise UnpublishException("Please save the model before unpublishing")
public_model = self.public
if public_model and not dry_run:
self.public = None
self.save()
public_model.delete(mark_for_deletion=False)
return public_model
def _get_public_or_publish(self, *arg, **kw):
# only publish if we don't yet have an id for the
# public model
if self.public:
return self.public
return self.publish(*arg, **kw)
def _get_through_model(self, field_object):
'''
Get the "through" model associated with this field.
Need to handle things differently for Django1.1 vs Django1.2
In 1.1 through is a string and through_model has class
In 1.2 through is the class
'''
through = field_object.rel.through
if through:
if isinstance(through, basestring):
return field_object.rel.through_model
return through
return None
def _changes_need_publishing(self):
return self.publish_state == Publishable.PUBLISH_CHANGED or not self.public
def publish_changes(self, dry_run=False, all_published=None, parent=None):
'''
publish changes to the model - basically copy all of it's content to another copy in the
database.
if you set dry_run=True nothing will be written to the database. combined with
the all_published value one can therefore get information about what other models
would be affected by this function
'''
assert not self.is_public, "Cannot publish public model - publish should be called from draft model"
assert self.pk is not None, "Please save model before publishing"
# avoid mutual recursion
if all_published is None:
all_published = NestedSet()
if self in all_published:
return all_published.original(self).public
all_published.add(self, parent=parent)
self._pre_publish(dry_run, all_published)
public_version = self.public
if not public_version:
public_version = self.__class__(is_public=True)
excluded_fields = self.PublishMeta.excluded_fields()
reverse_fields_to_publish = self.PublishMeta.reverse_fields_to_publish()
if self._changes_need_publishing():
# copy over regular fields
for field in self._meta.fields:
if field.name in excluded_fields:
continue
value = getattr(self, field.name)
if isinstance(field, RelatedField):
related = field.rel.to
if issubclass(related, Publishable):
if value is not None:
value = value._get_public_or_publish(dry_run=dry_run, all_published=all_published, parent=self)
if not dry_run:
publish_function = self.PublishMeta.find_publish_function(field.name, setattr)
publish_function(public_version, field.name, value)
# save the public version and update
# state so we know everything is up-to-date
if not dry_run:
public_version.save()
self.public = public_version
self.publish_state = Publishable.PUBLISH_DEFAULT
self.save(mark_changed=False)
# copy over many-to-many fields
for field in self._meta.many_to_many:
name = field.name
if name in excluded_fields:
continue
m2m_manager = getattr(self, name)
public_objs = list(m2m_manager.all())
field_object, model, direct, m2m = self._meta.get_field_by_name(name)
through_model = self._get_through_model(field_object)
if through_model:
# see if we can work out which reverse relationship this is
# see if we are using our own "through" table or not
if issubclass(through_model, Publishable):
# this will be db name (e.g. with _id on end)
m2m_reverse_name = field_object.m2m_reverse_name()
for reverse_field in through_model._meta.fields:
if reverse_field.column == m2m_reverse_name:
related_name = reverse_field.name
related_field = getattr(through_model, related_name).field
reverse_name = related_field.related.get_accessor_name()
reverse_fields_to_publish.append(reverse_name)
break
continue # m2m via through table won't be dealt with here
related = field_object.rel.to
if issubclass(related, Publishable):
public_objs = [p._get_public_or_publish(dry_run=dry_run, all_published=all_published, parent=self) for p in public_objs]
if not dry_run:
public_m2m_manager = getattr(public_version, name)
old_objs = public_m2m_manager.exclude(pk__in=[p.pk for p in public_objs])
public_m2m_manager.remove(*old_objs)
public_m2m_manager.add(*public_objs)
# one-to-many and one-to-one reverse relations
for obj in self._meta.get_all_related_objects():
if issubclass(obj.model, Publishable):
name = obj.get_accessor_name()
if name in excluded_fields:
continue
if name not in reverse_fields_to_publish:
continue
if obj.field.rel.multiple:
related_items = getattr(self, name).all()
else:
try:
related_items = [getattr(self, name)]
except obj.model.DoesNotExist:
related_items = []
for related_item in related_items:
related_item.publish(dry_run=dry_run, all_published=all_published, parent=self)
# make sure we tidy up anything that needs deleting
if self.public and not dry_run:
if obj.field.rel.multiple:
public_ids = [r.public_id for r in related_items]
deleted_items = getattr(self.public, name).exclude(pk__in=public_ids)
deleted_items.delete(mark_for_deletion=False)
self._post_publish(dry_run, all_published)
return public_version
def publish_deletions(self, all_published=None, parent=None, dry_run=False):
'''
actually delete models that have been marked for deletion
'''
if self.publish_state != Publishable.PUBLISH_DELETE:
return
if all_published is None:
all_published = NestedSet()
if self in all_published:
return
all_published.add(self, parent=parent)
self._pre_publish(dry_run, all_published, deleted=True)
for related in self._meta.get_all_related_objects():
if not issubclass(related.model, Publishable):
continue
name = related.get_accessor_name()
if name in self.PublishMeta.excluded_fields():
continue
try:
instances = getattr(self, name).all()
except AttributeError:
instances = [getattr(self, name)]
for instance in instances:
instance.publish_deletions(all_published=all_published, parent=self, dry_run=dry_run)
if not dry_run:
public = self.public
self.delete(mark_for_deletion=False)
if public:
public.delete(mark_for_deletion=False)
self._post_publish(dry_run, all_published, deleted=True)
if getattr(settings, 'TESTING_PUBLISH', False):
# classes to test that publishing etc work ok
from datetime import datetime
class Site(models.Model):
title = models.CharField(max_length=100)
domain = models.CharField(max_length=100)
class FlatPage(Publishable):
url = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
enable_comments = models.BooleanField()
template_name = models.CharField(max_length=70, blank=True)
registration_required = models.BooleanField()
sites = models.ManyToManyField(Site)
class Meta:
ordering = ['url']
def get_absolute_url(self):
if self.is_public:
return self.url
return '%s*' % self.url
class Author(Publishable):
name = models.CharField(max_length=100)
profile = models.TextField(blank=True)
class PublishMeta(Publishable.PublishMeta):
publish_reverse_fields = ['authorprofile']
class AuthorProfile(Publishable):
author = models.OneToOneField(Author)
extra_profile = models.TextField(blank=True)
class ChangeLog(models.Model):
changed = models.DateTimeField(db_index=True, auto_now_add=True)
message = models.CharField(max_length=200)
class Tag(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.CharField(max_length=100)
# publishable model with a reverse relation to
# page (as a child)
class PageBlock(Publishable):
page=models.ForeignKey('Page')
content = models.TextField(blank=True)
# non-publishable reverse relation to page (as a child)
class Comment(models.Model):
page=models.ForeignKey('Page')
comment = models.TextField()
def update_pub_date(page, field_name, value):
# ignore value entirely and replace with now
setattr(page, field_name, update_pub_date.pub_date)
update_pub_date.pub_date = datetime.now()
class Page(Publishable):
slug = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
pub_date = models.DateTimeField(default=datetime.now)
parent = models.ForeignKey('self', blank=True, null=True)
authors = models.ManyToManyField(Author, blank=True)
log = models.ManyToManyField(ChangeLog, blank=True)
tags = models.ManyToManyField(Tag, through='PageTagOrder', blank=True)
class Meta:
ordering = ['slug']
class PublishMeta(Publishable.PublishMeta):
publish_exclude_fields = ['log']
publish_reverse_fields = ['pageblock_set']
publish_functions = { 'pub_date': update_pub_date }
def get_absolute_url(self):
if not self.parent:
return u'/%s/' % self.slug
return '%s%s/' % (self.parent.get_absolute_url(), self.slug)
class PageTagOrder(Publishable):
# note these are named in non-standard way to
# ensure we are getting correct names
tagged_page=models.ForeignKey(Page)
page_tag=models.ForeignKey(Tag)
tag_order=models.IntegerField()
|
get_text_width
|
Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
|
# (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ctypes.util
import errno
import fcntl
import getpass
import locale
import splatlog as logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.six import with_metaclass, text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
try:
# Python 2
input = raw_input
except NameError:
# Python 3, we already have raw_input
pass
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
_LOCALE_INITIALIZED = False
_LOCALE_INITIALIZATION_ERR = None
def initialize_locale():
"""Set the locale to the users default setting
and set ``_LOCALE_INITIALIZED`` to indicate whether
``get_text_width`` may run into trouble
"""
global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
if _LOCALE_INITIALIZED is False:
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
_LOCALE_INITIALIZATION_ERR = e
else:
_LOCALE_INITIALIZED = True
# MASKED: get_text_width function (lines 79-144)
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
# map color to log levels
color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
C.COLOR_WARN: logging.WARNING,
C.COLOR_OK: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_VERBOSE: logging.INFO}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display(with_metaclass(Singleton, object)):
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.cows_available = set([to_text(c) for c in out.split()])
if C.ANSIBLE_COW_WHITELIST and any(C.ANSIBLE_COW_WHITELIST):
self.cows_available = set(C.ANSIBLE_COW_WHITELIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = False
self._set_column_width()
def set_cowsay_info(self):
if C.ANSIBLE_NOCOWS:
return
if C.ANSIBLE_COW_PATH:
self.b_cowsay = C.ANSIBLE_COW_PATH
else:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if not log_only:
has_newline = msg.endswith(u'\n')
if has_newline:
msg2 = msg[:-1]
else:
msg2 = msg
if color:
msg2 = stringc(msg2, color)
if has_newline or newline:
msg2 = msg2 + u'\n'
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
# Note: After Display() class is refactored need to update the log capture
# code in 'bin/ansible-connection' (and other relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger and not screen_only:
# We first convert to a byte string so that we get rid of
# color and characters that are invalid in the user's locale
msg2 = to_bytes(nocolor.lstrip(u'\n'))
if sys.version_info >= (3,):
# Convert back to text string on python3
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
lvl = logging.INFO
if color:
# set logger level based on color (not great)
try:
lvl = color_to_log_level[color]
except KeyError:
# this should not happen, but JIC
raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
# actually log
logger.log(lvl, msg2)
def v(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg, host=None):
if C.DEFAULT_DEBUG:
if host is None:
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
else:
self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
def verbose(self, msg, host=None, caplevel=2):
to_stderr = C.VERBOSE_TO_STDERR
if self.verbosity > caplevel:
if host is None:
self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
''' used to print out a deprecation message.'''
msg = msg.strip()
if msg and msg[-1] not in ['!', '?', '.']:
msg += '.'
if collection_name == 'ansible.builtin':
collection_name = 'ansible-base'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
removal_fragment = 'This feature was removed'
help_text = 'Please update your playbooks.'
else:
header = '[DEPRECATION WARNING]: {0}'.format(msg)
removal_fragment = 'This feature will be removed'
# FUTURE: make this a standalone warning so it only shows up once?
help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
if collection_name:
from_fragment = 'from {0}'.format(collection_name)
else:
from_fragment = ''
if date:
when = 'in a release after {0}.'.format(date)
elif version:
when = 'in version {0}.'.format(version)
else:
when = 'in a future release.'
message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
return message_text
def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
if not removed and not C.DEPRECATION_WARNINGS:
return
message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
if removed:
raise AnsibleError(message_text)
wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
message_text = "\n".join(wrapped) + "\n"
if message_text not in self._deprecations:
self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[message_text] = 1
def warning(self, msg, formatted=False):
if not formatted:
new_msg = "[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None, cows=True):
'''
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
'''
msg = to_text(msg)
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
try:
star_len = self.columns - get_text_width(msg)
except EnvironmentError:
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = random.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_text(prompt_string)
if private:
return getpass.getpass(prompt_string)
else:
return input(prompt_string)
def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
if unsafe:
result = wrap_var(result)
return result
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(1):
tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
|
def get_text_width(text):
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
if _LOCALE_INITIALIZATION_ERR:
Display().warning(
'An error occurred while calling ansible.utils.display.initialize_locale '
'(%s). This may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
)
elif not _LOCALE_INITIALIZED:
Display().warning(
'ansible.utils.display.initialize_locale has not been called, '
'this may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths'
)
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter and not _LOCALE_INITIALIZED:
raise EnvironmentError(
'ansible.utils.display.initialize_locale has not been called, '
'and get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
| 79 | 144 |
# (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ctypes.util
import errno
import fcntl
import getpass
import locale
import splatlog as logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.six import with_metaclass, text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
try:
# Python 2
input = raw_input
except NameError:
# Python 3, we already have raw_input
pass
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
_LOCALE_INITIALIZED = False
_LOCALE_INITIALIZATION_ERR = None
def initialize_locale():
"""Set the locale to the users default setting
and set ``_LOCALE_INITIALIZED`` to indicate whether
``get_text_width`` may run into trouble
"""
global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
if _LOCALE_INITIALIZED is False:
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
_LOCALE_INITIALIZATION_ERR = e
else:
_LOCALE_INITIALIZED = True
def get_text_width(text):
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
if _LOCALE_INITIALIZATION_ERR:
Display().warning(
'An error occurred while calling ansible.utils.display.initialize_locale '
'(%s). This may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
)
elif not _LOCALE_INITIALIZED:
Display().warning(
'ansible.utils.display.initialize_locale has not been called, '
'this may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths'
)
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter and not _LOCALE_INITIALIZED:
raise EnvironmentError(
'ansible.utils.display.initialize_locale has not been called, '
'and get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
# map color to log levels
color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
C.COLOR_WARN: logging.WARNING,
C.COLOR_OK: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_VERBOSE: logging.INFO}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display(with_metaclass(Singleton, object)):
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.cows_available = set([to_text(c) for c in out.split()])
if C.ANSIBLE_COW_WHITELIST and any(C.ANSIBLE_COW_WHITELIST):
self.cows_available = set(C.ANSIBLE_COW_WHITELIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = False
self._set_column_width()
def set_cowsay_info(self):
if C.ANSIBLE_NOCOWS:
return
if C.ANSIBLE_COW_PATH:
self.b_cowsay = C.ANSIBLE_COW_PATH
else:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if not log_only:
has_newline = msg.endswith(u'\n')
if has_newline:
msg2 = msg[:-1]
else:
msg2 = msg
if color:
msg2 = stringc(msg2, color)
if has_newline or newline:
msg2 = msg2 + u'\n'
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
# Note: After Display() class is refactored need to update the log capture
# code in 'bin/ansible-connection' (and other relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger and not screen_only:
# We first convert to a byte string so that we get rid of
# color and characters that are invalid in the user's locale
msg2 = to_bytes(nocolor.lstrip(u'\n'))
if sys.version_info >= (3,):
# Convert back to text string on python3
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
lvl = logging.INFO
if color:
# set logger level based on color (not great)
try:
lvl = color_to_log_level[color]
except KeyError:
# this should not happen, but JIC
raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
# actually log
logger.log(lvl, msg2)
def v(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg, host=None):
if C.DEFAULT_DEBUG:
if host is None:
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
else:
self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
def verbose(self, msg, host=None, caplevel=2):
to_stderr = C.VERBOSE_TO_STDERR
if self.verbosity > caplevel:
if host is None:
self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
''' used to print out a deprecation message.'''
msg = msg.strip()
if msg and msg[-1] not in ['!', '?', '.']:
msg += '.'
if collection_name == 'ansible.builtin':
collection_name = 'ansible-base'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
removal_fragment = 'This feature was removed'
help_text = 'Please update your playbooks.'
else:
header = '[DEPRECATION WARNING]: {0}'.format(msg)
removal_fragment = 'This feature will be removed'
# FUTURE: make this a standalone warning so it only shows up once?
help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
if collection_name:
from_fragment = 'from {0}'.format(collection_name)
else:
from_fragment = ''
if date:
when = 'in a release after {0}.'.format(date)
elif version:
when = 'in version {0}.'.format(version)
else:
when = 'in a future release.'
message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
return message_text
def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
if not removed and not C.DEPRECATION_WARNINGS:
return
message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
if removed:
raise AnsibleError(message_text)
wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
message_text = "\n".join(wrapped) + "\n"
if message_text not in self._deprecations:
self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[message_text] = 1
def warning(self, msg, formatted=False):
if not formatted:
new_msg = "[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None, cows=True):
'''
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
'''
msg = to_text(msg)
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
try:
star_len = self.columns - get_text_width(msg)
except EnvironmentError:
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = random.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_text(prompt_string)
if private:
return getpass.getpass(prompt_string)
else:
return input(prompt_string)
def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
if unsafe:
result = wrap_var(result)
return result
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(1):
tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
|
feature_evaluation
|
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
|
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from IMLearn.metrics import *
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df = df.dropna()
df = df[df['price'] > 0]
df = df[df['yr_built'] > 0]
df = df[df['bedrooms'] < 20]
df['date'] = df['date'].apply(lambda x: int(str(x)[:4]))
df = df[df['sqft_living'] <= df['sqft_lot']]
labels_to_drop = ['zipcode', 'lat', 'long', 'sqft_living15', 'sqft_lot15']
df.drop(columns=labels_to_drop, inplace=True)
series = df.pop('price')
return (df, series)
# MASKED: feature_evaluation function (lines 41-72)
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
df, series = load_data('../datasets/house_prices.csv')
# Question 2 - Feature evaluation with respect to response
feature_evaluation(df,series,'C:/Users/shahaf/Documents')
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(df, series, 0.75)
# Question 4 - Fit model over increasing percentages of the overall
# training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10
# times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon
# of size (mean-2*std, mean+2*std)
training_size = []
average_loss = []
var = []
for p in range(10, 101):
training_size.append(p / 100)
mse_list = []
for i in range(0, 10):
train_sample = train_X.sample(frac=p / 100)
sample_X, sample_y = train_sample, train_y.loc[
train_sample.index]
# model
model = LinearRegression()
model.fit(sample_X, sample_y)
mse_list.append(model.loss(test_X, test_y))
mse_arr = np.array(mse_list)
average_loss.append(mse_arr.mean())
var.append(mse_arr.std())
var = np.array(var)
average_loss = np.array(average_loss)
fig = go.Figure()
fig.add_trace(go.Scatter(x=training_size, y=average_loss,
mode="markers+lines",
marker=dict(color="LightSeaGreen"),name="Mean "
"MSE"))
fig.add_trace(go.Scatter(
x=training_size, y=average_loss - 2 * var, mode="lines", line=dict(
color="Aquamarine"),name="-2*STD"))
fig.add_trace(
go.Scatter(x=training_size,
y=average_loss + 2 * var, mode="lines", fill='tonexty',
line=dict(
color="Aquamarine"),name="+2*STD"))
fig.update_layout(title = "Mean MSE vs Precentage of Samples for "
"Fitting")
fig.show()
|
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
correlations = np.array(y.size)
features = list(X)
for feature in features:
cov = np.cov(y, X[feature])
std = np.std(X[feature]) * np.std(y)
pearson_correlation = cov[0][1] / std
np.append(correlations, pearson_correlation)
fig = go.Figure()
fig.add_trace(go.Scatter(x=X[feature], y=y, mode="markers"))
fig.update_layout(title=feature + " - Pearson Correlation = "
+ str(pearson_correlation),
xaxis_title=feature + " Feature values",
yaxis_title="House's Price")
fig.write_image(f"{output_path}\\{feature}.png", format="png")
| 41 | 72 |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from IMLearn.metrics import *
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df = df.dropna()
df = df[df['price'] > 0]
df = df[df['yr_built'] > 0]
df = df[df['bedrooms'] < 20]
df['date'] = df['date'].apply(lambda x: int(str(x)[:4]))
df = df[df['sqft_living'] <= df['sqft_lot']]
labels_to_drop = ['zipcode', 'lat', 'long', 'sqft_living15', 'sqft_lot15']
df.drop(columns=labels_to_drop, inplace=True)
series = df.pop('price')
return (df, series)
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
correlations = np.array(y.size)
features = list(X)
for feature in features:
cov = np.cov(y, X[feature])
std = np.std(X[feature]) * np.std(y)
pearson_correlation = cov[0][1] / std
np.append(correlations, pearson_correlation)
fig = go.Figure()
fig.add_trace(go.Scatter(x=X[feature], y=y, mode="markers"))
fig.update_layout(title=feature + " - Pearson Correlation = "
+ str(pearson_correlation),
xaxis_title=feature + " Feature values",
yaxis_title="House's Price")
fig.write_image(f"{output_path}\\{feature}.png", format="png")
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
df, series = load_data('../datasets/house_prices.csv')
# Question 2 - Feature evaluation with respect to response
feature_evaluation(df,series,'C:/Users/shahaf/Documents')
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(df, series, 0.75)
# Question 4 - Fit model over increasing percentages of the overall
# training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10
# times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon
# of size (mean-2*std, mean+2*std)
training_size = []
average_loss = []
var = []
for p in range(10, 101):
training_size.append(p / 100)
mse_list = []
for i in range(0, 10):
train_sample = train_X.sample(frac=p / 100)
sample_X, sample_y = train_sample, train_y.loc[
train_sample.index]
# model
model = LinearRegression()
model.fit(sample_X, sample_y)
mse_list.append(model.loss(test_X, test_y))
mse_arr = np.array(mse_list)
average_loss.append(mse_arr.mean())
var.append(mse_arr.std())
var = np.array(var)
average_loss = np.array(average_loss)
fig = go.Figure()
fig.add_trace(go.Scatter(x=training_size, y=average_loss,
mode="markers+lines",
marker=dict(color="LightSeaGreen"),name="Mean "
"MSE"))
fig.add_trace(go.Scatter(
x=training_size, y=average_loss - 2 * var, mode="lines", line=dict(
color="Aquamarine"),name="-2*STD"))
fig.add_trace(
go.Scatter(x=training_size,
y=average_loss + 2 * var, mode="lines", fill='tonexty',
line=dict(
color="Aquamarine"),name="+2*STD"))
fig.update_layout(title = "Mean MSE vs Precentage of Samples for "
"Fitting")
fig.show()
|
get
|
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['FirewallRule']
class FirewallRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
end_ip_address: Optional[pulumi.Input[str]] = None,
firewall_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
start_ip_address: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a server firewall rule.
API Version: 2014-04-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] end_ip_address: The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
:param pulumi.Input[str] firewall_rule_name: The name of the firewall rule.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] start_ip_address: The start IP address of the firewall rule. Must be IPv4 format. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if end_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'end_ip_address'")
__props__['end_ip_address'] = end_ip_address
__props__['firewall_rule_name'] = firewall_rule_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__['server_name'] = server_name
if start_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'start_ip_address'")
__props__['start_ip_address'] = start_ip_address
__props__['kind'] = None
__props__['location'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/latest:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20140401:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20150501preview:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:FirewallRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(FirewallRule, __self__).__init__(
'azure-nextgen:sql:FirewallRule',
resource_name,
__props__,
opts)
# MASKED: get function (lines 80-96)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> pulumi.Output[str]:
"""
The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of server that contains this firewall rule.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the server that contains this firewall rule.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> pulumi.Output[str]:
"""
The start IP address of the firewall rule. Must be IPv4 format. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallRule':
"""
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return FirewallRule(resource_name, opts=opts, __props__=__props__)
| 80 | 96 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['FirewallRule']
class FirewallRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
end_ip_address: Optional[pulumi.Input[str]] = None,
firewall_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
start_ip_address: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a server firewall rule.
API Version: 2014-04-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] end_ip_address: The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
:param pulumi.Input[str] firewall_rule_name: The name of the firewall rule.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] start_ip_address: The start IP address of the firewall rule. Must be IPv4 format. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if end_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'end_ip_address'")
__props__['end_ip_address'] = end_ip_address
__props__['firewall_rule_name'] = firewall_rule_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__['server_name'] = server_name
if start_ip_address is None and not opts.urn:
raise TypeError("Missing required property 'start_ip_address'")
__props__['start_ip_address'] = start_ip_address
__props__['kind'] = None
__props__['location'] = None
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/latest:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20140401:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20150501preview:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:FirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:FirewallRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(FirewallRule, __self__).__init__(
'azure-nextgen:sql:FirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FirewallRule':
"""
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return FirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> pulumi.Output[str]:
"""
The end IP address of the firewall rule. Must be IPv4 format. Must be greater than or equal to startIpAddress. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of server that contains this firewall rule.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the server that contains this firewall rule.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> pulumi.Output[str]:
"""
The start IP address of the firewall rule. Must be IPv4 format. Use value '0.0.0.0' to represent all Azure-internal IP addresses.
"""
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
evaluate
|
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
|
import matplotlib.pyplot as plt, streamlit as st
from typing import Iterable, Union
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc, RocCurveDisplay
def train(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Train custom classifier model.
Parameters:
estimator: Unfitted estimator.
X: Input training data.
y: Labels for test data.
Returns:
Fitted estimator model.
"""
return estimator.fit(X=X, y=y)
def classify(estimator: object, X: Iterable[Union[int, float]]):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
Returns:
Predicted labels.
"""
return estimator.predict(X=X)
def regress(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom regressor model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pass
# MASKED: evaluate function (lines 49-78)
|
def evaluate(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pred = estimator.predict(X=X)
# classification report
report = classification_report(y_true=y, y_pred=pred)
st.write('Classification Report')
st.write(report)
# ROC curve
fpr, tpr, thresholds = roc_curve(y, pred)
roc_auc = auc(fpr, tpr)
_, _, figure = RocCurveDisplay(
fpr=fpr,
tpr=tpr,
roc_auc=roc_auc,
estimator_name=type(estimator)
)
st.pyplot(fig=figure)
| 49 | 78 |
import matplotlib.pyplot as plt, streamlit as st
from typing import Iterable, Union
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc, RocCurveDisplay
def train(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Train custom classifier model.
Parameters:
estimator: Unfitted estimator.
X: Input training data.
y: Labels for test data.
Returns:
Fitted estimator model.
"""
return estimator.fit(X=X, y=y)
def classify(estimator: object, X: Iterable[Union[int, float]]):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
Returns:
Predicted labels.
"""
return estimator.predict(X=X)
def regress(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom regressor model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pass
def evaluate(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pred = estimator.predict(X=X)
# classification report
report = classification_report(y_true=y, y_pred=pred)
st.write('Classification Report')
st.write(report)
# ROC curve
fpr, tpr, thresholds = roc_curve(y, pred)
roc_auc = auc(fpr, tpr)
_, _, figure = RocCurveDisplay(
fpr=fpr,
tpr=tpr,
roc_auc=roc_auc,
estimator_name=type(estimator)
)
st.pyplot(fig=figure)
|
fit_shifts
|
Fits (non-iteratively and without sigma-clipping) a displacement
transformation only between input lists of positions ``xy`` and ``uv``.
When weights are provided, a weighted fit is performed. Parameter
descriptions and return values are identical to those in `iter_linear_fit`,
except returned ``fit`` dictionary does not contain the following
keys irrelevant to this function: ``'center'``, ``'fitmask'``, and
``'eff_nclip'``.
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A module that provides algorithms for performing linear fit between
sets of 2D points.
:Authors: Mihai Cara, Warren Hack
:License: :doc:`../LICENSE`
"""
import logging
import numbers
import numpy as np
from .linalg import inv
from . import __version__ # noqa: F401
__author__ = 'Mihai Cara, Warren Hack'
__all__ = ['iter_linear_fit', 'build_fit_matrix']
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class SingularMatrixError(Exception):
""" An error class used to report when a singular matrix is encountered."""
pass
class NotEnoughPointsError(Exception):
"""
An error class used to report when there are not enough points to
find parameters of a linear transformation.
"""
pass
def iter_linear_fit(xy, uv, wxy=None, wuv=None,
fitgeom='general', center=None,
nclip=3, sigma=(3.0, 'rmse'), clip_accum=False):
r"""
Compute linear transformation parameters that "best" (in the sense of
minimizing residuals) transform ``uv`` source position to ``xy``
sources iteratively using sigma-clipping.
More precisely, this functions attempts to find a ``2x2`` matrix ``F`` and
a shift vector ``s`` that minimize the residuals between the *transformed*
reference source coordinates ``uv``
.. math::
\mathbf{xy}'_k = \mathbf{F}\cdot(\mathbf{uv}_k-\mathbf{c})+\
\mathbf{s} + \mathbf{c}
:label: ilf1
and the "observed" source positions ``xy``:
.. math::
\epsilon^2 = \Sigma_k w_k \|\mathbf{xy}_k-\mathbf{xy}'_k\|^2.
:label: ilf2
In the above equations, :math:`\mathbf{F}` is a ``2x2`` matrix while
:math:`\mathbf{xy}_k` and :math:`\mathbf{uv}_k` are the position
coordinates of the ``k``-th source (row in input ``xy`` and ``uv`` arrays).
One of the two catalogs (``xy`` or ``uv``) contains what we refer to as
"image" source positions and the other one as "reference" source positions.
The meaning assigned to ``xy`` and ``uv`` parameters are up to the
caller of this function.
Parameters
----------
xy: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line).
uv: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line). This array *must have* the same length (shape)
as the ``xy`` array.
wxy: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wuv`` is also set to `None`.
See ``Notes`` section for more details.
wuv: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wxy`` is also set to `None`.
See ``Notes`` section for more details.
fitgeom: {'shift', 'rscale', 'general'}, optional
The fitting geometry to be used in fitting the matched object lists.
This parameter is used in fitting the shifts (offsets), rotations
and/or scale changes from the matched object lists. The 'general'
fit geometry allows for independent scale and rotation for each axis.
center: tuple, list, numpy.ndarray, None, optional
A list-like container with two ``X``- and ``Y``-positions of the center
(origin) of rotations in the ``uv`` and ``xy`` coordinate frames.
If not provided, ``center`` is estimated as a (weighted) mean position
in the ``uv`` frame.
nclip: int, None, optional
Number (a non-negative integer) of clipping iterations in fit.
Clipping will be turned off if ``nclip`` is either `None` or 0.
sigma: float, tuple of the form (float, str), optional
When a tuple is provided, first value (a positive number)
indicates the number of "fit error estimates" to use for clipping.
The second value (a string) indicates the statistic to be
used for "fit error estimate". Currently the following values are
supported: ``'rmse'``, ``'mae'``, and ``'std'``
- see ``Notes`` section for more details.
When ``sigma`` is a single number, it must be a positive number and
the default error estimate ``'rmse'`` is assumed.
This parameter is ignored when ``nclip`` is either `None` or 0.
clip_accum: bool, optional
Indicates whether or not to reset the list of "bad" (clipped out)
sources after each clipping iteration. When set to `True` the list
only grows with each iteration as "bad" positions never re-enter the
pool of available position for the fit. By default the list of
"bad" source positions is purged at each iteration.
Returns
-------
fit: dict
- ``'shift'``: A ``numpy.ndarray`` with two components of the
computed shift.
- ``'shift_ld'``: A ``numpy.ndarray`` with two components of the
computed shift of type ``numpy.longdouble``.
- ``'matrix'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix.
- ``'matrix_ld'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix of type ``numpy.longdouble``.
- ``'proper_rot'``: Rotation angle (degree) as if the rotation is
proper.
- ``'rot'``: A tuple of ``(rotx, roty)`` - the rotation angles with
regard to the ``X`` and ``Y`` axes.
- ``'<rot>'``: *Arithmetic mean* of the angles of rotation around
``X`` and ``Y`` axes.
- ``'scale'``: A tuple of ``(sx, sy)`` - scale change in the direction
of the ``X`` and ``Y`` axes.
- ``'<scale>'``: *Geometric mean* of scales ``sx`` and ``sy``.
- ``'skew'``: Computed skew.
- ``'proper'``: a boolean indicating whether the rotation is proper.
- ``'fitgeom'``: Fit geometry (allowed transformations) used for
fitting data (to minimize residuals). This is copy of the input
argument ``fitgeom``.
- ``'center'``: Center of rotation
- ``'center_ld'``: Center of rotation as a ``numpy.longdouble``.
- ``'fitmask'``: A boolean array indicating which source positions
where used for fitting (`True`) and which were clipped out
(`False`). **NOTE** For weighted fits, positions with zero
weights are automatically excluded from the fits.
- ``'eff_nclip'``: Effective number of clipping iterations
- ``'rmse'``: Root-Mean-Square Error
- ``'mae'``: Mean Absolute Error
- ``'std'``: Standard Deviation of the residuals
- ``'resids'``: An array of residuals of the fit.
**NOTE:** Only the residuals for the "valid" points are reported
here. Therefore the length of this array may be smaller than the
length of input arrays of positions.
Notes
-----
**Weights**
Weights can be provided for both "image" source positions and "reference"
source positions. When no weights are given, all positions are weighted
equally. When only one set of positions have weights (i.e., either ``wxy``
or ``wuv`` is not `None`) then weights in :eq:`ilf2` are set to be equal
to the provided set of weights. When weights for *both* "image" source
positions and "reference" source positions are provided, then the
combined weight that is used in :eq:`ilf2` is computed as:
.. math::
1/w = 1/w_{xy} + 1/w_{uv}.
**Statistics for clipping**
Several statistics are available for clipping iterations and all of them
are reported in the returned ``fit`` dictionary regardless of the
setting in ``sigma``:
.. math::
\mathrm{RMSE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|^2}
.. math::
\mathrm{MAE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|}
.. math::
\mathrm{STD} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k - \
\mathbf{\overline{r}}\|^2}/(1-V_2)
where :math:`\mathbf{r}_k=\mathbf{xy}_k-\mathbf{xy}'_k`,
:math:`\Sigma_k w_k = 1`, and :math:`V_2=\Sigma_k w_k^2`.
"""
if fitgeom == 'general':
linear_fit = fit_general
elif fitgeom == 'rscale':
linear_fit = fit_rscale
elif fitgeom == 'shift':
linear_fit = fit_shifts
else:
raise ValueError("Unsupported 'fitgeom' value: '{}'".format(fitgeom))
minobj_per_fitgeom = {'shift': 1, 'rscale': 2, 'general': 3}
minobj = minobj_per_fitgeom[fitgeom]
xy = np.array(xy, dtype=np.longdouble)
uv = np.array(uv, dtype=np.longdouble)
if len(xy.shape) != 2 or xy.shape[1] != 2 or uv.shape != xy.shape:
raise ValueError("Input coordinate arrays 'xy' and 'uv' must be of "
"shape (N, 2) where N is the number of coordinate "
"points.")
wmask = np.ones(len(xy), dtype=np.bool_)
if wxy is not None:
wxy = np.asarray(wxy)
if len(wxy.shape) != 1 or wxy.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wxy' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wxy > 0.0
if wuv is not None:
wuv = np.asarray(wuv)
if len(wuv.shape) != 1 or wuv.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wuv' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wuv > 0.0
mask = wmask
if sigma is None and nclip is not None and nclip > 0:
raise ValueError("Argument 'sigma' cannot be None when 'nclip' is "
"a positive number.")
if isinstance(sigma, numbers.Number):
sigstat = 'rmse' # default value
nsigma = float(sigma)
elif sigma is not None:
nsigma = float(sigma[0])
sigstat = sigma[1]
if sigstat not in ['rmse', 'mae', 'std']:
raise ValueError("Unsupported sigma statistics value.")
if sigma is not None and nsigma <= 0.0:
raise ValueError("The value of sigma for clipping iterations must be "
"positive.")
if nclip is None:
nclip = 0
else:
if nclip < 0:
raise ValueError("Argument 'nclip' must be non-negative.")
nclip = int(nclip)
if np.count_nonzero(mask) == minobj:
log.warning("The number of sources for the fit is smaller than the "
"minimum number of sources necessary for the requested "
"'fitgeom'.")
log.warning("Resetting number of clipping iterations to 0.")
nclip = 0
if center is None:
center_ld = uv[mask].mean(axis=0, dtype=np.longdouble)
center = center_ld.astype(np.double)
else:
center_ld = np.longdouble(center)
xy[mask] -= center_ld
uv[mask] -= center_ld
log.info("Performing '{:s}' fit".format(fitgeom))
# initial fit:
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
# clipping iterations:
effective_nclip = 0
for n in range(nclip):
resids = fit['resids']
# redefine what pixels will be included in next iteration
cutoff = nsigma * fit[sigstat]
nonclipped = np.linalg.norm(resids, axis=1) < cutoff
if np.count_nonzero(nonclipped) < minobj or nonclipped.all():
break
effective_nclip += 1
prev_mask = mask
if not clip_accum:
mask = np.array(wmask)
mask[prev_mask] *= nonclipped
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
fit['center'] = center
fit['center_ld'] = center_ld
fit['fitmask'] = mask
fit['eff_nclip'] = effective_nclip
return fit
def _compute_stat(fit, residuals, weights):
if weights is None:
fit['rmse'] = float(np.sqrt(np.mean(2 * residuals**2)))
fit['mae'] = float(np.mean(np.linalg.norm(residuals, axis=1)))
fit['std'] = float(np.linalg.norm(residuals.std(axis=0)))
else:
# assume all weights > 0 (this should be insured by the caller => no
# need to repeat the check here)
npts = len(weights)
wt = np.sum(weights)
if npts == 0 or wt == 0.0:
fit['rmse'] = float('nan')
fit['mae'] = float('nan')
fit['std'] = float('nan')
return
w = weights / wt
fit['rmse'] = float(np.sqrt(np.sum(np.dot(w, residuals**2))))
fit['mae'] = float(np.dot(w, np.linalg.norm(residuals, axis=1)))
if npts == 1:
fit['std'] = 0.0
else:
# see:
# https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights_2
wmean = np.dot(w, residuals)
fit['std'] = float(
np.sqrt(np.sum(np.dot(w, (residuals - wmean)**2) /
(1.0 - np.sum(w**2))))
)
# MASKED: fit_shifts function (lines 356-412)
# Implementation of geomap 'rscale' fitting based on 'lib/geofit.x'
# by Warren Hack. Support for axis flips added by Mihai Cara.
def fit_rscale(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation and scale transformations between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 2:
raise NotEnoughPointsError(
"At least two points are required to find shifts, rotation, and "
"scale."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
xm = np.mean(x)
ym = np.mean(y)
um = np.mean(u)
vm = np.mean(v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(u, u)
sv2 = np.dot(v, v)
sxv = np.dot(x, v)
syu = np.dot(y, u)
sxu = np.dot(x, u)
syv = np.dot(y, v)
su2v2 = su2 + sv2
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 2:
raise ValueError("Not enough valid data for 'rscale' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
xm = np.dot(w, x)
ym = np.dot(w, y)
um = np.dot(w, u)
vm = np.dot(w, v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(w, u**2)
sv2 = np.dot(w, v**2)
sxv = np.dot(w, x * v)
syu = np.dot(w, y * u)
sxu = np.dot(w, x * u)
syv = np.dot(w, y * v)
su2v2 = su2 + sv2
det = sxu * syv - sxv * syu
if det < 0:
rot_num = sxv + syu
rot_denom = sxu - syv
else:
rot_num = sxv - syu
rot_denom = sxu + syv
if rot_num == rot_denom:
theta = 0.0
else:
theta = np.rad2deg(np.arctan2(rot_num, rot_denom))
if theta < 0:
theta += 360.0
ctheta = np.cos(np.deg2rad(theta))
stheta = np.sin(np.deg2rad(theta))
s_num = rot_denom * ctheta + rot_num * stheta
if su2v2 > 0.0:
mag = s_num / su2v2
else:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
if det < 0:
# "flip" y-axis (reflection about x-axis *after* rotation)
# NOTE: keep in mind that 'matrix' is the transposed rotation matrix.
sthetax = -mag * stheta
cthetay = -mag * ctheta
else:
sthetax = mag * stheta
cthetay = mag * ctheta
cthetax = mag * ctheta
sthetay = mag * stheta
sdet = np.sign(det)
xshift = xm - um * cthetax - sdet * vm * sthetax
yshift = ym + sdet * um * sthetay - vm * cthetay
p = np.array([cthetax, sthetay, xshift], dtype=np.longdouble)
q = np.array([-sthetax, cthetay, yshift], dtype=np.longdouble)
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, fitgeom='rscale')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def fit_general(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation, scale, and skew transformations (i.e., the full ``2x2``
transformation matrix) between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 3:
raise NotEnoughPointsError(
"At least three points are required to find 6-parameter linear "
"affine transformations."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
# Set up products used for computing the fit
sw = float(x.size)
sx = x.sum()
sy = y.sum()
su = u.sum()
sv = v.sum()
sxu = np.dot(x, u)
syu = np.dot(y, u)
sxv = np.dot(x, v)
syv = np.dot(y, v)
suu = np.dot(u, u)
svv = np.dot(v, v)
suv = np.dot(u, v)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 3:
raise ValueError("Not enough valid data for 'general' fit: "
"too many weights are zero!")
# Set up products used for computing the fit
sw = np.sum(w, dtype=np.longdouble)
sx = np.dot(w, x)
sy = np.dot(w, y)
su = np.dot(w, u)
sv = np.dot(w, v)
sxu = np.dot(w, x * u)
syu = np.dot(w, y * u)
sxv = np.dot(w, x * v)
syv = np.dot(w, y * v)
suu = np.dot(w, u * u)
svv = np.dot(w, v * v)
suv = np.dot(w, u * v)
m = np.array([[su, sv, sw], [suu, suv, su], [suv, svv, sv]],
dtype=np.longdouble)
a = np.array([sx, sxu, sxv], dtype=np.longdouble)
b = np.array([sy, syu, syv], dtype=np.longdouble)
try:
inv_m = inv(m)
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
p = np.dot(inv_m, a)
q = np.dot(inv_m, b)
if not (np.all(np.isfinite(p)) and np.all(np.isfinite(q))):
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
) # pragma: no cover
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, 'general')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def _build_fit(p, q, fitgeom):
# Build fit matrix:
fit_matrix = np.vstack((p[:2], q[:2]))
# determinant of the transformation
det = p[0] * q[1] - p[1] * q[0]
sdet = np.sign(det)
proper = sdet >= 0
# Create a working copy (no reflections) for computing transformation
# parameters (scale, rotation angle, skew):
wfit = fit_matrix.copy()
# Skew is zero for all fitgeom except 'general':
skew = 0.0
if fitgeom == 'shift':
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': 0.0,
'rot': (0.0, 0.0),
'<rot>': 0.0,
'scale': (1.0, 1.0),
'<scale>': 1.0,
'skew': 0.0,
'proper': proper,
'fitgeom': 'shift'
}
return fit
# Compute average scale:
s = np.sqrt(np.abs(det))
# Compute scales for each axis:
if fitgeom == 'general':
sx, sy = np.sqrt(p[:2]**2 + q[:2]**2)
else:
sx = s
sy = s
# Remove scale from the transformation matrix:
wfit[:, 0] /= sx
wfit[:, 1] /= sy
# Compute rotation angle as if we have a proper rotation.
# This will also act as *some sort* of "average rotation" even for
# transformations with different rot_x and rot_y:
prop_rot = np.rad2deg(
np.arctan2(wfit[0, 1] - sdet * wfit[1, 0],
wfit[0, 0] + sdet * wfit[1, 1])
)
if proper and fitgeom == 'rscale':
rotx = prop_rot
roty = prop_rot
rot = prop_rot
else:
rotx = np.rad2deg(np.arctan2(-wfit[1, 0], wfit[0, 0]))
roty = np.rad2deg(np.arctan2(wfit[0, 1], wfit[1, 1]))
rot = 0.5 * (rotx + roty)
skew = np.mod(roty - rotx - 180.0, 360.0) - 180.0
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': float(prop_rot),
'rot': (float(rotx), float(roty)),
'<rot>': float(rot),
'scale': (float(sx), float(sy)),
'<scale>': float(s),
'skew': float(skew),
'proper': proper,
'fitgeom': fitgeom
}
return fit
def build_fit_matrix(rot, scale=1):
r"""
Create an affine transformation matrix (2x2) from the provided rotation
angle(s) and scale(s):
.. math::
M = \begin{bmatrix}
s_x \cos(\theta_x) & s_y \sin(\theta_y) \\
-s_x \sin(\theta_x) & s_y \cos(\theta_y)
\end{bmatrix}
Parameters
----------
rot: tuple, float, optional
Rotation angle in degrees. Two values (one for each axis) can be
provided as a tuple.
scale: tuple, float, optional
Scale of the liniar transformation. Two values (one for each axis)
can be provided as a tuple.
Returns
-------
matrix: numpy.ndarray
A 2x2 `numpy.ndarray` containing coefficients of a liniear
transformation.
"""
if hasattr(rot, '__iter__'):
rx, ry = map(np.deg2rad, rot)
else:
rx = ry = np.deg2rad(float(rot))
if hasattr(scale, '__iter__'):
sx, sy = scale
else:
sx = sy = float(scale)
matrix = np.array([[sx * np.cos(rx), sy * np.sin(ry)],
[-sx * np.sin(rx), sy * np.cos(ry)]])
return matrix
|
def fit_shifts(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement
transformation only between input lists of positions ``xy`` and ``uv``.
When weights are provided, a weighted fit is performed. Parameter
descriptions and return values are identical to those in `iter_linear_fit`,
except returned ``fit`` dictionary does not contain the following
keys irrelevant to this function: ``'center'``, ``'fitmask'``, and
``'eff_nclip'``.
"""
if xy.size == 0:
raise NotEnoughPointsError(
"At least one point is required to find shifts."
)
diff_pts = np.subtract(xy, uv, dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
meanx = diff_pts[:, 0].mean(dtype=np.longdouble)
meany = diff_pts[:, 1].mean(dtype=np.longdouble)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if not np.sum(w > 0, dtype=np.int):
raise ValueError("Not enough valid data for 'shift' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
meanx = np.dot(w, diff_pts[:, 0])
meany = np.dot(w, diff_pts[:, 1])
p = np.array([1.0, 0.0, meanx], dtype=np.longdouble)
q = np.array([0.0, 1.0, meany], dtype=np.longdouble)
fit = _build_fit(p, q, 'shift')
resids = diff_pts - fit['shift']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
| 356 | 412 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A module that provides algorithms for performing linear fit between
sets of 2D points.
:Authors: Mihai Cara, Warren Hack
:License: :doc:`../LICENSE`
"""
import logging
import numbers
import numpy as np
from .linalg import inv
from . import __version__ # noqa: F401
__author__ = 'Mihai Cara, Warren Hack'
__all__ = ['iter_linear_fit', 'build_fit_matrix']
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class SingularMatrixError(Exception):
""" An error class used to report when a singular matrix is encountered."""
pass
class NotEnoughPointsError(Exception):
"""
An error class used to report when there are not enough points to
find parameters of a linear transformation.
"""
pass
def iter_linear_fit(xy, uv, wxy=None, wuv=None,
fitgeom='general', center=None,
nclip=3, sigma=(3.0, 'rmse'), clip_accum=False):
r"""
Compute linear transformation parameters that "best" (in the sense of
minimizing residuals) transform ``uv`` source position to ``xy``
sources iteratively using sigma-clipping.
More precisely, this functions attempts to find a ``2x2`` matrix ``F`` and
a shift vector ``s`` that minimize the residuals between the *transformed*
reference source coordinates ``uv``
.. math::
\mathbf{xy}'_k = \mathbf{F}\cdot(\mathbf{uv}_k-\mathbf{c})+\
\mathbf{s} + \mathbf{c}
:label: ilf1
and the "observed" source positions ``xy``:
.. math::
\epsilon^2 = \Sigma_k w_k \|\mathbf{xy}_k-\mathbf{xy}'_k\|^2.
:label: ilf2
In the above equations, :math:`\mathbf{F}` is a ``2x2`` matrix while
:math:`\mathbf{xy}_k` and :math:`\mathbf{uv}_k` are the position
coordinates of the ``k``-th source (row in input ``xy`` and ``uv`` arrays).
One of the two catalogs (``xy`` or ``uv``) contains what we refer to as
"image" source positions and the other one as "reference" source positions.
The meaning assigned to ``xy`` and ``uv`` parameters are up to the
caller of this function.
Parameters
----------
xy: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line).
uv: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line). This array *must have* the same length (shape)
as the ``xy`` array.
wxy: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wuv`` is also set to `None`.
See ``Notes`` section for more details.
wuv: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wxy`` is also set to `None`.
See ``Notes`` section for more details.
fitgeom: {'shift', 'rscale', 'general'}, optional
The fitting geometry to be used in fitting the matched object lists.
This parameter is used in fitting the shifts (offsets), rotations
and/or scale changes from the matched object lists. The 'general'
fit geometry allows for independent scale and rotation for each axis.
center: tuple, list, numpy.ndarray, None, optional
A list-like container with two ``X``- and ``Y``-positions of the center
(origin) of rotations in the ``uv`` and ``xy`` coordinate frames.
If not provided, ``center`` is estimated as a (weighted) mean position
in the ``uv`` frame.
nclip: int, None, optional
Number (a non-negative integer) of clipping iterations in fit.
Clipping will be turned off if ``nclip`` is either `None` or 0.
sigma: float, tuple of the form (float, str), optional
When a tuple is provided, first value (a positive number)
indicates the number of "fit error estimates" to use for clipping.
The second value (a string) indicates the statistic to be
used for "fit error estimate". Currently the following values are
supported: ``'rmse'``, ``'mae'``, and ``'std'``
- see ``Notes`` section for more details.
When ``sigma`` is a single number, it must be a positive number and
the default error estimate ``'rmse'`` is assumed.
This parameter is ignored when ``nclip`` is either `None` or 0.
clip_accum: bool, optional
Indicates whether or not to reset the list of "bad" (clipped out)
sources after each clipping iteration. When set to `True` the list
only grows with each iteration as "bad" positions never re-enter the
pool of available position for the fit. By default the list of
"bad" source positions is purged at each iteration.
Returns
-------
fit: dict
- ``'shift'``: A ``numpy.ndarray`` with two components of the
computed shift.
- ``'shift_ld'``: A ``numpy.ndarray`` with two components of the
computed shift of type ``numpy.longdouble``.
- ``'matrix'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix.
- ``'matrix_ld'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix of type ``numpy.longdouble``.
- ``'proper_rot'``: Rotation angle (degree) as if the rotation is
proper.
- ``'rot'``: A tuple of ``(rotx, roty)`` - the rotation angles with
regard to the ``X`` and ``Y`` axes.
- ``'<rot>'``: *Arithmetic mean* of the angles of rotation around
``X`` and ``Y`` axes.
- ``'scale'``: A tuple of ``(sx, sy)`` - scale change in the direction
of the ``X`` and ``Y`` axes.
- ``'<scale>'``: *Geometric mean* of scales ``sx`` and ``sy``.
- ``'skew'``: Computed skew.
- ``'proper'``: a boolean indicating whether the rotation is proper.
- ``'fitgeom'``: Fit geometry (allowed transformations) used for
fitting data (to minimize residuals). This is copy of the input
argument ``fitgeom``.
- ``'center'``: Center of rotation
- ``'center_ld'``: Center of rotation as a ``numpy.longdouble``.
- ``'fitmask'``: A boolean array indicating which source positions
where used for fitting (`True`) and which were clipped out
(`False`). **NOTE** For weighted fits, positions with zero
weights are automatically excluded from the fits.
- ``'eff_nclip'``: Effective number of clipping iterations
- ``'rmse'``: Root-Mean-Square Error
- ``'mae'``: Mean Absolute Error
- ``'std'``: Standard Deviation of the residuals
- ``'resids'``: An array of residuals of the fit.
**NOTE:** Only the residuals for the "valid" points are reported
here. Therefore the length of this array may be smaller than the
length of input arrays of positions.
Notes
-----
**Weights**
Weights can be provided for both "image" source positions and "reference"
source positions. When no weights are given, all positions are weighted
equally. When only one set of positions have weights (i.e., either ``wxy``
or ``wuv`` is not `None`) then weights in :eq:`ilf2` are set to be equal
to the provided set of weights. When weights for *both* "image" source
positions and "reference" source positions are provided, then the
combined weight that is used in :eq:`ilf2` is computed as:
.. math::
1/w = 1/w_{xy} + 1/w_{uv}.
**Statistics for clipping**
Several statistics are available for clipping iterations and all of them
are reported in the returned ``fit`` dictionary regardless of the
setting in ``sigma``:
.. math::
\mathrm{RMSE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|^2}
.. math::
\mathrm{MAE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|}
.. math::
\mathrm{STD} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k - \
\mathbf{\overline{r}}\|^2}/(1-V_2)
where :math:`\mathbf{r}_k=\mathbf{xy}_k-\mathbf{xy}'_k`,
:math:`\Sigma_k w_k = 1`, and :math:`V_2=\Sigma_k w_k^2`.
"""
if fitgeom == 'general':
linear_fit = fit_general
elif fitgeom == 'rscale':
linear_fit = fit_rscale
elif fitgeom == 'shift':
linear_fit = fit_shifts
else:
raise ValueError("Unsupported 'fitgeom' value: '{}'".format(fitgeom))
minobj_per_fitgeom = {'shift': 1, 'rscale': 2, 'general': 3}
minobj = minobj_per_fitgeom[fitgeom]
xy = np.array(xy, dtype=np.longdouble)
uv = np.array(uv, dtype=np.longdouble)
if len(xy.shape) != 2 or xy.shape[1] != 2 or uv.shape != xy.shape:
raise ValueError("Input coordinate arrays 'xy' and 'uv' must be of "
"shape (N, 2) where N is the number of coordinate "
"points.")
wmask = np.ones(len(xy), dtype=np.bool_)
if wxy is not None:
wxy = np.asarray(wxy)
if len(wxy.shape) != 1 or wxy.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wxy' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wxy > 0.0
if wuv is not None:
wuv = np.asarray(wuv)
if len(wuv.shape) != 1 or wuv.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wuv' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wuv > 0.0
mask = wmask
if sigma is None and nclip is not None and nclip > 0:
raise ValueError("Argument 'sigma' cannot be None when 'nclip' is "
"a positive number.")
if isinstance(sigma, numbers.Number):
sigstat = 'rmse' # default value
nsigma = float(sigma)
elif sigma is not None:
nsigma = float(sigma[0])
sigstat = sigma[1]
if sigstat not in ['rmse', 'mae', 'std']:
raise ValueError("Unsupported sigma statistics value.")
if sigma is not None and nsigma <= 0.0:
raise ValueError("The value of sigma for clipping iterations must be "
"positive.")
if nclip is None:
nclip = 0
else:
if nclip < 0:
raise ValueError("Argument 'nclip' must be non-negative.")
nclip = int(nclip)
if np.count_nonzero(mask) == minobj:
log.warning("The number of sources for the fit is smaller than the "
"minimum number of sources necessary for the requested "
"'fitgeom'.")
log.warning("Resetting number of clipping iterations to 0.")
nclip = 0
if center is None:
center_ld = uv[mask].mean(axis=0, dtype=np.longdouble)
center = center_ld.astype(np.double)
else:
center_ld = np.longdouble(center)
xy[mask] -= center_ld
uv[mask] -= center_ld
log.info("Performing '{:s}' fit".format(fitgeom))
# initial fit:
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
# clipping iterations:
effective_nclip = 0
for n in range(nclip):
resids = fit['resids']
# redefine what pixels will be included in next iteration
cutoff = nsigma * fit[sigstat]
nonclipped = np.linalg.norm(resids, axis=1) < cutoff
if np.count_nonzero(nonclipped) < minobj or nonclipped.all():
break
effective_nclip += 1
prev_mask = mask
if not clip_accum:
mask = np.array(wmask)
mask[prev_mask] *= nonclipped
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
fit['center'] = center
fit['center_ld'] = center_ld
fit['fitmask'] = mask
fit['eff_nclip'] = effective_nclip
return fit
def _compute_stat(fit, residuals, weights):
if weights is None:
fit['rmse'] = float(np.sqrt(np.mean(2 * residuals**2)))
fit['mae'] = float(np.mean(np.linalg.norm(residuals, axis=1)))
fit['std'] = float(np.linalg.norm(residuals.std(axis=0)))
else:
# assume all weights > 0 (this should be insured by the caller => no
# need to repeat the check here)
npts = len(weights)
wt = np.sum(weights)
if npts == 0 or wt == 0.0:
fit['rmse'] = float('nan')
fit['mae'] = float('nan')
fit['std'] = float('nan')
return
w = weights / wt
fit['rmse'] = float(np.sqrt(np.sum(np.dot(w, residuals**2))))
fit['mae'] = float(np.dot(w, np.linalg.norm(residuals, axis=1)))
if npts == 1:
fit['std'] = 0.0
else:
# see:
# https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights_2
wmean = np.dot(w, residuals)
fit['std'] = float(
np.sqrt(np.sum(np.dot(w, (residuals - wmean)**2) /
(1.0 - np.sum(w**2))))
)
def fit_shifts(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement
transformation only between input lists of positions ``xy`` and ``uv``.
When weights are provided, a weighted fit is performed. Parameter
descriptions and return values are identical to those in `iter_linear_fit`,
except returned ``fit`` dictionary does not contain the following
keys irrelevant to this function: ``'center'``, ``'fitmask'``, and
``'eff_nclip'``.
"""
if xy.size == 0:
raise NotEnoughPointsError(
"At least one point is required to find shifts."
)
diff_pts = np.subtract(xy, uv, dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
meanx = diff_pts[:, 0].mean(dtype=np.longdouble)
meany = diff_pts[:, 1].mean(dtype=np.longdouble)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if not np.sum(w > 0, dtype=np.int):
raise ValueError("Not enough valid data for 'shift' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
meanx = np.dot(w, diff_pts[:, 0])
meany = np.dot(w, diff_pts[:, 1])
p = np.array([1.0, 0.0, meanx], dtype=np.longdouble)
q = np.array([0.0, 1.0, meany], dtype=np.longdouble)
fit = _build_fit(p, q, 'shift')
resids = diff_pts - fit['shift']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
# Implementation of geomap 'rscale' fitting based on 'lib/geofit.x'
# by Warren Hack. Support for axis flips added by Mihai Cara.
def fit_rscale(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation and scale transformations between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 2:
raise NotEnoughPointsError(
"At least two points are required to find shifts, rotation, and "
"scale."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
xm = np.mean(x)
ym = np.mean(y)
um = np.mean(u)
vm = np.mean(v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(u, u)
sv2 = np.dot(v, v)
sxv = np.dot(x, v)
syu = np.dot(y, u)
sxu = np.dot(x, u)
syv = np.dot(y, v)
su2v2 = su2 + sv2
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 2:
raise ValueError("Not enough valid data for 'rscale' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
xm = np.dot(w, x)
ym = np.dot(w, y)
um = np.dot(w, u)
vm = np.dot(w, v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(w, u**2)
sv2 = np.dot(w, v**2)
sxv = np.dot(w, x * v)
syu = np.dot(w, y * u)
sxu = np.dot(w, x * u)
syv = np.dot(w, y * v)
su2v2 = su2 + sv2
det = sxu * syv - sxv * syu
if det < 0:
rot_num = sxv + syu
rot_denom = sxu - syv
else:
rot_num = sxv - syu
rot_denom = sxu + syv
if rot_num == rot_denom:
theta = 0.0
else:
theta = np.rad2deg(np.arctan2(rot_num, rot_denom))
if theta < 0:
theta += 360.0
ctheta = np.cos(np.deg2rad(theta))
stheta = np.sin(np.deg2rad(theta))
s_num = rot_denom * ctheta + rot_num * stheta
if su2v2 > 0.0:
mag = s_num / su2v2
else:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
if det < 0:
# "flip" y-axis (reflection about x-axis *after* rotation)
# NOTE: keep in mind that 'matrix' is the transposed rotation matrix.
sthetax = -mag * stheta
cthetay = -mag * ctheta
else:
sthetax = mag * stheta
cthetay = mag * ctheta
cthetax = mag * ctheta
sthetay = mag * stheta
sdet = np.sign(det)
xshift = xm - um * cthetax - sdet * vm * sthetax
yshift = ym + sdet * um * sthetay - vm * cthetay
p = np.array([cthetax, sthetay, xshift], dtype=np.longdouble)
q = np.array([-sthetax, cthetay, yshift], dtype=np.longdouble)
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, fitgeom='rscale')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def fit_general(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation, scale, and skew transformations (i.e., the full ``2x2``
transformation matrix) between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 3:
raise NotEnoughPointsError(
"At least three points are required to find 6-parameter linear "
"affine transformations."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
# Set up products used for computing the fit
sw = float(x.size)
sx = x.sum()
sy = y.sum()
su = u.sum()
sv = v.sum()
sxu = np.dot(x, u)
syu = np.dot(y, u)
sxv = np.dot(x, v)
syv = np.dot(y, v)
suu = np.dot(u, u)
svv = np.dot(v, v)
suv = np.dot(u, v)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 3:
raise ValueError("Not enough valid data for 'general' fit: "
"too many weights are zero!")
# Set up products used for computing the fit
sw = np.sum(w, dtype=np.longdouble)
sx = np.dot(w, x)
sy = np.dot(w, y)
su = np.dot(w, u)
sv = np.dot(w, v)
sxu = np.dot(w, x * u)
syu = np.dot(w, y * u)
sxv = np.dot(w, x * v)
syv = np.dot(w, y * v)
suu = np.dot(w, u * u)
svv = np.dot(w, v * v)
suv = np.dot(w, u * v)
m = np.array([[su, sv, sw], [suu, suv, su], [suv, svv, sv]],
dtype=np.longdouble)
a = np.array([sx, sxu, sxv], dtype=np.longdouble)
b = np.array([sy, syu, syv], dtype=np.longdouble)
try:
inv_m = inv(m)
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
p = np.dot(inv_m, a)
q = np.dot(inv_m, b)
if not (np.all(np.isfinite(p)) and np.all(np.isfinite(q))):
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
) # pragma: no cover
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, 'general')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def _build_fit(p, q, fitgeom):
# Build fit matrix:
fit_matrix = np.vstack((p[:2], q[:2]))
# determinant of the transformation
det = p[0] * q[1] - p[1] * q[0]
sdet = np.sign(det)
proper = sdet >= 0
# Create a working copy (no reflections) for computing transformation
# parameters (scale, rotation angle, skew):
wfit = fit_matrix.copy()
# Skew is zero for all fitgeom except 'general':
skew = 0.0
if fitgeom == 'shift':
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': 0.0,
'rot': (0.0, 0.0),
'<rot>': 0.0,
'scale': (1.0, 1.0),
'<scale>': 1.0,
'skew': 0.0,
'proper': proper,
'fitgeom': 'shift'
}
return fit
# Compute average scale:
s = np.sqrt(np.abs(det))
# Compute scales for each axis:
if fitgeom == 'general':
sx, sy = np.sqrt(p[:2]**2 + q[:2]**2)
else:
sx = s
sy = s
# Remove scale from the transformation matrix:
wfit[:, 0] /= sx
wfit[:, 1] /= sy
# Compute rotation angle as if we have a proper rotation.
# This will also act as *some sort* of "average rotation" even for
# transformations with different rot_x and rot_y:
prop_rot = np.rad2deg(
np.arctan2(wfit[0, 1] - sdet * wfit[1, 0],
wfit[0, 0] + sdet * wfit[1, 1])
)
if proper and fitgeom == 'rscale':
rotx = prop_rot
roty = prop_rot
rot = prop_rot
else:
rotx = np.rad2deg(np.arctan2(-wfit[1, 0], wfit[0, 0]))
roty = np.rad2deg(np.arctan2(wfit[0, 1], wfit[1, 1]))
rot = 0.5 * (rotx + roty)
skew = np.mod(roty - rotx - 180.0, 360.0) - 180.0
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': float(prop_rot),
'rot': (float(rotx), float(roty)),
'<rot>': float(rot),
'scale': (float(sx), float(sy)),
'<scale>': float(s),
'skew': float(skew),
'proper': proper,
'fitgeom': fitgeom
}
return fit
def build_fit_matrix(rot, scale=1):
r"""
Create an affine transformation matrix (2x2) from the provided rotation
angle(s) and scale(s):
.. math::
M = \begin{bmatrix}
s_x \cos(\theta_x) & s_y \sin(\theta_y) \\
-s_x \sin(\theta_x) & s_y \cos(\theta_y)
\end{bmatrix}
Parameters
----------
rot: tuple, float, optional
Rotation angle in degrees. Two values (one for each axis) can be
provided as a tuple.
scale: tuple, float, optional
Scale of the liniar transformation. Two values (one for each axis)
can be provided as a tuple.
Returns
-------
matrix: numpy.ndarray
A 2x2 `numpy.ndarray` containing coefficients of a liniear
transformation.
"""
if hasattr(rot, '__iter__'):
rx, ry = map(np.deg2rad, rot)
else:
rx = ry = np.deg2rad(float(rot))
if hasattr(scale, '__iter__'):
sx, sy = scale
else:
sx = sy = float(scale)
matrix = np.array([[sx * np.cos(rx), sy * np.sin(ry)],
[-sx * np.sin(rx), sy * np.cos(ry)]])
return matrix
|
_encode_files
|
Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
|
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
# MASKED: _encode_files function (lines 114-176)
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
| 114 | 176 |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
iter_content
|
Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
|
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
# MASKED: iter_content function (lines 737-790)
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
| 737 | 790 |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
iter_lines
|
Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
|
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
# MASKED: iter_lines function (lines 792-821)
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
| 792 | 821 |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
json
|
Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
|
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
# MASKED: json function (lines 881-905)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
| 881 | 905 |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
close
|
Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
|
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
# MASKED: close function (lines 950-961)
|
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| 950 | 961 |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
initialize
|
Initialize a module.
Args:
module (``torch.nn.Module``): the module will be initialized.
init_cfg (dict | list[dict]): initialization configuration dict to
define initializer. OpenMMLab has implemented 6 initializers
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
``Kaiming``, and ``Pretrained``.
Example:
>>> module = nn.Linear(2, 3, bias=True)
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
>>> initialize(module, init_cfg)
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
>>> # define key ``'layer'`` for initializing layer with different
>>> # configuration
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
dict(type='Constant', layer='Linear', val=2)]
>>> initialize(module, init_cfg)
>>> # define key``'override'`` to initialize some specific part in
>>> # module
>>> class FooNet(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.feat = nn.Conv2d(3, 16, 3)
>>> self.reg = nn.Conv2d(16, 10, 3)
>>> self.cls = nn.Conv2d(16, 5, 3)
>>> model = FooNet()
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
>>> initialize(model, init_cfg)
>>> model = ResNet(depth=50)
>>> # Initialize weights with the pretrained model.
>>> init_cfg = dict(type='Pretrained',
checkpoint='torchvision://resnet50')
>>> initialize(model, init_cfg)
>>> # Initialize weights of a sub-module with the specific part of
>>> # a pretrained model by using "prefix".
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/' >>> 'retinanet_r50_fpn_1x_coco/' >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
>>> init_cfg = dict(type='Pretrained',
checkpoint=url, prefix='backbone.')
|
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: achao
# File Name: weight_init.py
# Description:
"""
import copy
import math
import warnings
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from deep3dmap.core.utils import Registry, build_from_cfg, get_logger, print_log
INITIALIZERS = Registry('initializer')
def update_init_info(module, init_info):
"""Update the `_params_init_info` in the module if the value of parameters
are changed.
Args:
module (obj:`nn.Module`): The module of PyTorch with a user-defined
attribute `_params_init_info` which records the initialization
information.
init_info (str): The string that describes the initialization.
"""
assert hasattr(
module,
'_params_init_info'), f'Can not find `_params_init_info` in {module}'
for name, param in module.named_parameters():
assert param in module._params_init_info, (
f'Find a new :obj:`Parameter` '
f'named `{name}` during executing the '
f'`init_weights` of '
f'`{module.__class__.__name__}`. '
f'Please do not add or '
f'replace parameters during executing '
f'the `init_weights`. ')
# The parameter has been changed during executing the
# `init_weights` of module
mean_value = param.data.mean()
if module._params_init_info[param]['tmp_mean_value'] != mean_value:
module._params_init_info[param]['init_info'] = init_info
module._params_init_info[param]['tmp_mean_value'] = mean_value
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def normal_init(module, mean=0, std=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def trunc_normal_init(module: nn.Module,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
bias: float = 0) -> None:
if hasattr(module, 'weight') and module.weight is not None:
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias) # type: ignore
def uniform_init(module, a=0, b=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.uniform_(module.weight, a, b)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def caffe2_xavier_init(module, bias=0):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
kaiming_init(
module,
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
bias=bias,
distribution='uniform')
def bias_init_with_prob(prior_prob):
"""initialize conv/fc bias value according to a given probability value."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
def _get_bases_name(m):
return [b.__name__ for b in m.__class__.__bases__]
class BaseInit(object):
def __init__(self, *, bias=0, bias_prob=None, layer=None):
self.wholemodule = False
if not isinstance(bias, (int, float)):
raise TypeError(f'bias must be a number, but got a {type(bias)}')
if bias_prob is not None:
if not isinstance(bias_prob, float):
raise TypeError(f'bias_prob type must be float, \
but got {type(bias_prob)}')
if layer is not None:
if not isinstance(layer, (str, list)):
raise TypeError(f'layer must be a str or a list of str, \
but got a {type(layer)}')
else:
layer = []
if bias_prob is not None:
self.bias = bias_init_with_prob(bias_prob)
else:
self.bias = bias
self.layer = [layer] if isinstance(layer, str) else layer
def _get_init_info(self):
info = f'{self.__class__.__name__}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Constant')
class ConstantInit(BaseInit):
"""Initialize module parameters with constant values.
Args:
val (int | float): the value to fill the weights in the module with
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, val, **kwargs):
super().__init__(**kwargs)
self.val = val
def __call__(self, module):
def init(m):
if self.wholemodule:
constant_init(m, self.val, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
constant_init(m, self.val, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Xavier')
class XavierInit(BaseInit):
r"""Initialize module parameters with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks - Glorot, X. & Bengio, Y. (2010).
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
Args:
gain (int | float): an optional scaling factor. Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'``
or ``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, gain=1, distribution='normal', **kwargs):
super().__init__(**kwargs)
self.gain = gain
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
xavier_init(m, self.gain, self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
xavier_init(m, self.gain, self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: gain={self.gain}, ' \
f'distribution={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Normal')
class NormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
mean (int | float):the mean of the normal distribution. Defaults to 0.
std (int | float): the standard deviation of the normal distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, mean=0, std=1, **kwargs):
super().__init__(**kwargs)
self.mean = mean
self.std = std
def __call__(self, module):
def init(m):
if self.wholemodule:
normal_init(m, self.mean, self.std, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
normal_init(m, self.mean, self.std, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: mean={self.mean},' \
f' std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='TruncNormal')
class TruncNormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
outside :math:`[a, b]`.
Args:
mean (float): the mean of the normal distribution. Defaults to 0.
std (float): the standard deviation of the normal distribution.
Defaults to 1.
a (float): The minimum cutoff value.
b ( float): The maximum cutoff value.
bias (float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
**kwargs) -> None:
super().__init__(**kwargs)
self.mean = mean
self.std = std
self.a = a
self.b = b
def __call__(self, module: nn.Module) -> None:
def init(m):
if self.wholemodule:
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
f' mean={self.mean}, std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Uniform')
class UniformInit(BaseInit):
r"""Initialize module parameters with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
a (int | float): the lower bound of the uniform distribution.
Defaults to 0.
b (int | float): the upper bound of the uniform distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, a=0, b=1, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
def __call__(self, module):
def init(m):
if self.wholemodule:
uniform_init(m, self.a, self.b, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
uniform_init(m, self.a, self.b, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a},' \
f' b={self.b}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit):
r"""Initialize module parameters with the values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification - He, K. et al. (2015).
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
Args:
a (int | float): the negative slope of the rectifier used after this
layer (only used with ``'leaky_relu'``). Defaults to 0.
mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
``'fan_in'`` preserves the magnitude of the variance of the weights
in the forward pass. Choosing ``'fan_out'`` preserves the
magnitudes in the backwards pass. Defaults to ``'fan_out'``.
nonlinearity (str): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
Defaults to 'relu'.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'`` or
``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
a=0,
mode='fan_out',
nonlinearity='relu',
distribution='normal',
**kwargs):
super().__init__(**kwargs)
self.a = a
self.mode = mode
self.nonlinearity = nonlinearity
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
f'nonlinearity={self.nonlinearity}, ' \
f'distribution ={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Caffe2Xavier')
class Caffe2XavierInit(KaimingInit):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
def __init__(self, **kwargs):
super().__init__(
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
distribution='uniform',
**kwargs)
def __call__(self, module):
super().__call__(module)
@INITIALIZERS.register_module(name='Pretrained')
class PretrainedInit(object):
"""Initialize module by loading a pretrained model.
Args:
checkpoint (str): the checkpoint file of the pretrained model should
be load.
prefix (str, optional): the prefix of a sub-module in the pretrained
model. it is for loading a part of the pretrained model to
initialize. For example, if we would like to only load the
backbone of a detector model, we can set ``prefix='backbone.'``.
Defaults to None.
map_location (str): map tensors into proper locations.
"""
def __init__(self, checkpoint, prefix=None, map_location=None):
self.checkpoint = checkpoint
self.prefix = prefix
self.map_location = map_location
def __call__(self, module):
from deep3dmap.runners import (_load_checkpoint_with_prefix, load_checkpoint,
load_state_dict)
logger = get_logger('deep3dmap')
if self.prefix is None:
print_log(f'load model from: {self.checkpoint}', logger=logger)
load_checkpoint(
module,
self.checkpoint,
map_location=self.map_location,
strict=False,
logger=logger)
else:
print_log(
f'load {self.prefix} in model from: {self.checkpoint}',
logger=logger)
state_dict = _load_checkpoint_with_prefix(
self.prefix, self.checkpoint, map_location=self.map_location)
load_state_dict(module, state_dict, strict=False, logger=logger)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
return info
def _initialize(module, cfg, wholemodule=False):
func = build_from_cfg(cfg, INITIALIZERS)
# wholemodule flag is for override mode, there is no layer key in override
# and initializer will give init values for the whole module with the name
# in override.
func.wholemodule = wholemodule
func(module)
def _initialize_override(module, override, cfg):
if not isinstance(override, (dict, list)):
raise TypeError(f'override must be a dict or a list of dict, \
but got {type(override)}')
override = [override] if isinstance(override, dict) else override
for override_ in override:
cp_override = copy.deepcopy(override_)
name = cp_override.pop('name', None)
if name is None:
raise ValueError('`override` must contain the key "name",'
f'but got {cp_override}')
# if override only has name key, it means use args in init_cfg
if not cp_override:
cp_override.update(cfg)
# if override has name key and other args except type key, it will
# raise error
elif 'type' not in cp_override.keys():
raise ValueError(
f'`override` need "type" key, but got {cp_override}')
if hasattr(module, name):
_initialize(getattr(module, name), cp_override, wholemodule=True)
else:
raise RuntimeError(f'module did not have attribute {name}, '
f'but init_cfg is {cp_override}.')
# MASKED: initialize function (lines 556-625)
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
b: float) -> Tensor:
# Method based on
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
# Modified from
# https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [lower, upper], then translate
# to [2lower-1, 2upper-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor: Tensor,
mean: float = 0.,
std: float = 1.,
a: float = -2.,
b: float = 2.) -> Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Modified from
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
Args:
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
mean (float): the mean of the normal distribution.
std (float): the standard deviation of the normal distribution.
a (float): the minimum cutoff value.
b (float): the maximum cutoff value.
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
def initialize(module, init_cfg):
"""Initialize a module.
Args:
module (``torch.nn.Module``): the module will be initialized.
init_cfg (dict | list[dict]): initialization configuration dict to
define initializer. OpenMMLab has implemented 6 initializers
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
``Kaiming``, and ``Pretrained``.
Example:
>>> module = nn.Linear(2, 3, bias=True)
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
>>> initialize(module, init_cfg)
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
>>> # define key ``'layer'`` for initializing layer with different
>>> # configuration
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
dict(type='Constant', layer='Linear', val=2)]
>>> initialize(module, init_cfg)
>>> # define key``'override'`` to initialize some specific part in
>>> # module
>>> class FooNet(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.feat = nn.Conv2d(3, 16, 3)
>>> self.reg = nn.Conv2d(16, 10, 3)
>>> self.cls = nn.Conv2d(16, 5, 3)
>>> model = FooNet()
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
>>> initialize(model, init_cfg)
>>> model = ResNet(depth=50)
>>> # Initialize weights with the pretrained model.
>>> init_cfg = dict(type='Pretrained',
checkpoint='torchvision://resnet50')
>>> initialize(model, init_cfg)
>>> # Initialize weights of a sub-module with the specific part of
>>> # a pretrained model by using "prefix".
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
>>> 'retinanet_r50_fpn_1x_coco/'\
>>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
>>> init_cfg = dict(type='Pretrained',
checkpoint=url, prefix='backbone.')
"""
if not isinstance(init_cfg, (dict, list)):
raise TypeError(f'init_cfg must be a dict or a list of dict, \
but got {type(init_cfg)}')
if isinstance(init_cfg, dict):
init_cfg = [init_cfg]
for cfg in init_cfg:
# should deeply copy the original config because cfg may be used by
# other modules, e.g., one init_cfg shared by multiple bottleneck
# blocks, the expected cfg will be changed after pop and will change
# the initialization behavior of other modules
cp_cfg = copy.deepcopy(cfg)
override = cp_cfg.pop('override', None)
_initialize(module, cp_cfg)
if override is not None:
cp_cfg.pop('layer', None)
_initialize_override(module, override, cp_cfg)
else:
# All attributes in module have same initialization.
pass
| 556 | 625 |
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: achao
# File Name: weight_init.py
# Description:
"""
import copy
import math
import warnings
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from deep3dmap.core.utils import Registry, build_from_cfg, get_logger, print_log
INITIALIZERS = Registry('initializer')
def update_init_info(module, init_info):
"""Update the `_params_init_info` in the module if the value of parameters
are changed.
Args:
module (obj:`nn.Module`): The module of PyTorch with a user-defined
attribute `_params_init_info` which records the initialization
information.
init_info (str): The string that describes the initialization.
"""
assert hasattr(
module,
'_params_init_info'), f'Can not find `_params_init_info` in {module}'
for name, param in module.named_parameters():
assert param in module._params_init_info, (
f'Find a new :obj:`Parameter` '
f'named `{name}` during executing the '
f'`init_weights` of '
f'`{module.__class__.__name__}`. '
f'Please do not add or '
f'replace parameters during executing '
f'the `init_weights`. ')
# The parameter has been changed during executing the
# `init_weights` of module
mean_value = param.data.mean()
if module._params_init_info[param]['tmp_mean_value'] != mean_value:
module._params_init_info[param]['init_info'] = init_info
module._params_init_info[param]['tmp_mean_value'] = mean_value
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def normal_init(module, mean=0, std=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def trunc_normal_init(module: nn.Module,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
bias: float = 0) -> None:
if hasattr(module, 'weight') and module.weight is not None:
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias) # type: ignore
def uniform_init(module, a=0, b=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.uniform_(module.weight, a, b)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def caffe2_xavier_init(module, bias=0):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
kaiming_init(
module,
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
bias=bias,
distribution='uniform')
def bias_init_with_prob(prior_prob):
"""initialize conv/fc bias value according to a given probability value."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
def _get_bases_name(m):
return [b.__name__ for b in m.__class__.__bases__]
class BaseInit(object):
def __init__(self, *, bias=0, bias_prob=None, layer=None):
self.wholemodule = False
if not isinstance(bias, (int, float)):
raise TypeError(f'bias must be a number, but got a {type(bias)}')
if bias_prob is not None:
if not isinstance(bias_prob, float):
raise TypeError(f'bias_prob type must be float, \
but got {type(bias_prob)}')
if layer is not None:
if not isinstance(layer, (str, list)):
raise TypeError(f'layer must be a str or a list of str, \
but got a {type(layer)}')
else:
layer = []
if bias_prob is not None:
self.bias = bias_init_with_prob(bias_prob)
else:
self.bias = bias
self.layer = [layer] if isinstance(layer, str) else layer
def _get_init_info(self):
info = f'{self.__class__.__name__}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Constant')
class ConstantInit(BaseInit):
"""Initialize module parameters with constant values.
Args:
val (int | float): the value to fill the weights in the module with
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, val, **kwargs):
super().__init__(**kwargs)
self.val = val
def __call__(self, module):
def init(m):
if self.wholemodule:
constant_init(m, self.val, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
constant_init(m, self.val, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Xavier')
class XavierInit(BaseInit):
r"""Initialize module parameters with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks - Glorot, X. & Bengio, Y. (2010).
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
Args:
gain (int | float): an optional scaling factor. Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'``
or ``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, gain=1, distribution='normal', **kwargs):
super().__init__(**kwargs)
self.gain = gain
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
xavier_init(m, self.gain, self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
xavier_init(m, self.gain, self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: gain={self.gain}, ' \
f'distribution={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Normal')
class NormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
mean (int | float):the mean of the normal distribution. Defaults to 0.
std (int | float): the standard deviation of the normal distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, mean=0, std=1, **kwargs):
super().__init__(**kwargs)
self.mean = mean
self.std = std
def __call__(self, module):
def init(m):
if self.wholemodule:
normal_init(m, self.mean, self.std, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
normal_init(m, self.mean, self.std, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: mean={self.mean},' \
f' std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='TruncNormal')
class TruncNormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
outside :math:`[a, b]`.
Args:
mean (float): the mean of the normal distribution. Defaults to 0.
std (float): the standard deviation of the normal distribution.
Defaults to 1.
a (float): The minimum cutoff value.
b ( float): The maximum cutoff value.
bias (float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
**kwargs) -> None:
super().__init__(**kwargs)
self.mean = mean
self.std = std
self.a = a
self.b = b
def __call__(self, module: nn.Module) -> None:
def init(m):
if self.wholemodule:
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
f' mean={self.mean}, std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Uniform')
class UniformInit(BaseInit):
r"""Initialize module parameters with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
a (int | float): the lower bound of the uniform distribution.
Defaults to 0.
b (int | float): the upper bound of the uniform distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, a=0, b=1, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
def __call__(self, module):
def init(m):
if self.wholemodule:
uniform_init(m, self.a, self.b, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
uniform_init(m, self.a, self.b, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a},' \
f' b={self.b}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit):
r"""Initialize module parameters with the values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification - He, K. et al. (2015).
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
Args:
a (int | float): the negative slope of the rectifier used after this
layer (only used with ``'leaky_relu'``). Defaults to 0.
mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
``'fan_in'`` preserves the magnitude of the variance of the weights
in the forward pass. Choosing ``'fan_out'`` preserves the
magnitudes in the backwards pass. Defaults to ``'fan_out'``.
nonlinearity (str): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
Defaults to 'relu'.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'`` or
``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
a=0,
mode='fan_out',
nonlinearity='relu',
distribution='normal',
**kwargs):
super().__init__(**kwargs)
self.a = a
self.mode = mode
self.nonlinearity = nonlinearity
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
f'nonlinearity={self.nonlinearity}, ' \
f'distribution ={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Caffe2Xavier')
class Caffe2XavierInit(KaimingInit):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
def __init__(self, **kwargs):
super().__init__(
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
distribution='uniform',
**kwargs)
def __call__(self, module):
super().__call__(module)
@INITIALIZERS.register_module(name='Pretrained')
class PretrainedInit(object):
"""Initialize module by loading a pretrained model.
Args:
checkpoint (str): the checkpoint file of the pretrained model should
be load.
prefix (str, optional): the prefix of a sub-module in the pretrained
model. it is for loading a part of the pretrained model to
initialize. For example, if we would like to only load the
backbone of a detector model, we can set ``prefix='backbone.'``.
Defaults to None.
map_location (str): map tensors into proper locations.
"""
def __init__(self, checkpoint, prefix=None, map_location=None):
self.checkpoint = checkpoint
self.prefix = prefix
self.map_location = map_location
def __call__(self, module):
from deep3dmap.runners import (_load_checkpoint_with_prefix, load_checkpoint,
load_state_dict)
logger = get_logger('deep3dmap')
if self.prefix is None:
print_log(f'load model from: {self.checkpoint}', logger=logger)
load_checkpoint(
module,
self.checkpoint,
map_location=self.map_location,
strict=False,
logger=logger)
else:
print_log(
f'load {self.prefix} in model from: {self.checkpoint}',
logger=logger)
state_dict = _load_checkpoint_with_prefix(
self.prefix, self.checkpoint, map_location=self.map_location)
load_state_dict(module, state_dict, strict=False, logger=logger)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
return info
def _initialize(module, cfg, wholemodule=False):
func = build_from_cfg(cfg, INITIALIZERS)
# wholemodule flag is for override mode, there is no layer key in override
# and initializer will give init values for the whole module with the name
# in override.
func.wholemodule = wholemodule
func(module)
def _initialize_override(module, override, cfg):
if not isinstance(override, (dict, list)):
raise TypeError(f'override must be a dict or a list of dict, \
but got {type(override)}')
override = [override] if isinstance(override, dict) else override
for override_ in override:
cp_override = copy.deepcopy(override_)
name = cp_override.pop('name', None)
if name is None:
raise ValueError('`override` must contain the key "name",'
f'but got {cp_override}')
# if override only has name key, it means use args in init_cfg
if not cp_override:
cp_override.update(cfg)
# if override has name key and other args except type key, it will
# raise error
elif 'type' not in cp_override.keys():
raise ValueError(
f'`override` need "type" key, but got {cp_override}')
if hasattr(module, name):
_initialize(getattr(module, name), cp_override, wholemodule=True)
else:
raise RuntimeError(f'module did not have attribute {name}, '
f'but init_cfg is {cp_override}.')
def initialize(module, init_cfg):
"""Initialize a module.
Args:
module (``torch.nn.Module``): the module will be initialized.
init_cfg (dict | list[dict]): initialization configuration dict to
define initializer. OpenMMLab has implemented 6 initializers
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
``Kaiming``, and ``Pretrained``.
Example:
>>> module = nn.Linear(2, 3, bias=True)
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
>>> initialize(module, init_cfg)
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
>>> # define key ``'layer'`` for initializing layer with different
>>> # configuration
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
dict(type='Constant', layer='Linear', val=2)]
>>> initialize(module, init_cfg)
>>> # define key``'override'`` to initialize some specific part in
>>> # module
>>> class FooNet(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.feat = nn.Conv2d(3, 16, 3)
>>> self.reg = nn.Conv2d(16, 10, 3)
>>> self.cls = nn.Conv2d(16, 5, 3)
>>> model = FooNet()
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
>>> initialize(model, init_cfg)
>>> model = ResNet(depth=50)
>>> # Initialize weights with the pretrained model.
>>> init_cfg = dict(type='Pretrained',
checkpoint='torchvision://resnet50')
>>> initialize(model, init_cfg)
>>> # Initialize weights of a sub-module with the specific part of
>>> # a pretrained model by using "prefix".
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
>>> 'retinanet_r50_fpn_1x_coco/'\
>>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
>>> init_cfg = dict(type='Pretrained',
checkpoint=url, prefix='backbone.')
"""
if not isinstance(init_cfg, (dict, list)):
raise TypeError(f'init_cfg must be a dict or a list of dict, \
but got {type(init_cfg)}')
if isinstance(init_cfg, dict):
init_cfg = [init_cfg]
for cfg in init_cfg:
# should deeply copy the original config because cfg may be used by
# other modules, e.g., one init_cfg shared by multiple bottleneck
# blocks, the expected cfg will be changed after pop and will change
# the initialization behavior of other modules
cp_cfg = copy.deepcopy(cfg)
override = cp_cfg.pop('override', None)
_initialize(module, cp_cfg)
if override is not None:
cp_cfg.pop('layer', None)
_initialize_override(module, override, cp_cfg)
else:
# All attributes in module have same initialization.
pass
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
b: float) -> Tensor:
# Method based on
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
# Modified from
# https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [lower, upper], then translate
# to [2lower-1, 2upper-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor: Tensor,
mean: float = 0.,
std: float = 1.,
a: float = -2.,
b: float = 2.) -> Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Modified from
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
Args:
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
mean (float): the mean of the normal distribution.
std (float): the standard deviation of the normal distribution.
a (float): the minimum cutoff value.
b (float): the maximum cutoff value.
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
patch_settings
|
Merge settings with global cms settings, so all required attributes
will exist. Never override, just append non existing settings.
Also check for setting inconsistencies if settings.DEBUG
|
# -*- coding: utf-8 -*-
from cms.exceptions import CMSDeprecationWarning
from django.conf import settings
from patch import post_patch, post_patch_check, pre_patch
import warnings
# MASKED: patch_settings function (lines 9-37)
patch_settings.ALREADY_PATCHED = False
|
def patch_settings():
"""Merge settings with global cms settings, so all required attributes
will exist. Never override, just append non existing settings.
Also check for setting inconsistencies if settings.DEBUG
"""
if patch_settings.ALREADY_PATCHED:
return
patch_settings.ALREADY_PATCHED = True
if getattr(settings, 'CMS_MODERATOR', False):
warnings.warn("CMS_MODERATOR will be removed and replaced in django CMS 2.4!", CMSDeprecationWarning)
from cms.conf import global_settings
# patch settings
pre_patch()
# merge with global cms settings
for attr in dir(global_settings):
if attr == attr.upper() and not hasattr(settings, attr):
setattr(settings._wrapped, attr, getattr(global_settings, attr))
post_patch()
if settings.DEBUG:
# check if settings are correct, call this only if debugging is enabled
post_patch_check()
| 9 | 37 |
# -*- coding: utf-8 -*-
from cms.exceptions import CMSDeprecationWarning
from django.conf import settings
from patch import post_patch, post_patch_check, pre_patch
import warnings
def patch_settings():
"""Merge settings with global cms settings, so all required attributes
will exist. Never override, just append non existing settings.
Also check for setting inconsistencies if settings.DEBUG
"""
if patch_settings.ALREADY_PATCHED:
return
patch_settings.ALREADY_PATCHED = True
if getattr(settings, 'CMS_MODERATOR', False):
warnings.warn("CMS_MODERATOR will be removed and replaced in django CMS 2.4!", CMSDeprecationWarning)
from cms.conf import global_settings
# patch settings
pre_patch()
# merge with global cms settings
for attr in dir(global_settings):
if attr == attr.upper() and not hasattr(settings, attr):
setattr(settings._wrapped, attr, getattr(global_settings, attr))
post_patch()
if settings.DEBUG:
# check if settings are correct, call this only if debugging is enabled
post_patch_check()
patch_settings.ALREADY_PATCHED = False
|
list_database_account_keys
|
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDatabaseAccountKeysResult',
'AwaitableListDatabaseAccountKeysResult',
'list_database_account_keys',
]
@pulumi.output_type
class ListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
"""
def __init__(__self__, primary_master_key=None, primary_readonly_master_key=None, secondary_master_key=None, secondary_readonly_master_key=None):
if primary_master_key and not isinstance(primary_master_key, str):
raise TypeError("Expected argument 'primary_master_key' to be a str")
pulumi.set(__self__, "primary_master_key", primary_master_key)
if primary_readonly_master_key and not isinstance(primary_readonly_master_key, str):
raise TypeError("Expected argument 'primary_readonly_master_key' to be a str")
pulumi.set(__self__, "primary_readonly_master_key", primary_readonly_master_key)
if secondary_master_key and not isinstance(secondary_master_key, str):
raise TypeError("Expected argument 'secondary_master_key' to be a str")
pulumi.set(__self__, "secondary_master_key", secondary_master_key)
if secondary_readonly_master_key and not isinstance(secondary_readonly_master_key, str):
raise TypeError("Expected argument 'secondary_readonly_master_key' to be a str")
pulumi.set(__self__, "secondary_readonly_master_key", secondary_readonly_master_key)
@property
@pulumi.getter(name="primaryMasterKey")
def primary_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-write key.
"""
return pulumi.get(self, "primary_master_key")
@property
@pulumi.getter(name="primaryReadonlyMasterKey")
def primary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-only key.
"""
return pulumi.get(self, "primary_readonly_master_key")
@property
@pulumi.getter(name="secondaryMasterKey")
def secondary_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-write key.
"""
return pulumi.get(self, "secondary_master_key")
@property
@pulumi.getter(name="secondaryReadonlyMasterKey")
def secondary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-only key.
"""
return pulumi.get(self, "secondary_readonly_master_key")
class AwaitableListDatabaseAccountKeysResult(ListDatabaseAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabaseAccountKeysResult(
primary_master_key=self.primary_master_key,
primary_readonly_master_key=self.primary_readonly_master_key,
secondary_master_key=self.secondary_master_key,
secondary_readonly_master_key=self.secondary_readonly_master_key)
# MASKED: list_database_account_keys function (lines 81-104)
|
def list_database_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value
return AwaitableListDatabaseAccountKeysResult(
primary_master_key=__ret__.primary_master_key,
primary_readonly_master_key=__ret__.primary_readonly_master_key,
secondary_master_key=__ret__.secondary_master_key,
secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
| 81 | 104 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDatabaseAccountKeysResult',
'AwaitableListDatabaseAccountKeysResult',
'list_database_account_keys',
]
@pulumi.output_type
class ListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
"""
def __init__(__self__, primary_master_key=None, primary_readonly_master_key=None, secondary_master_key=None, secondary_readonly_master_key=None):
if primary_master_key and not isinstance(primary_master_key, str):
raise TypeError("Expected argument 'primary_master_key' to be a str")
pulumi.set(__self__, "primary_master_key", primary_master_key)
if primary_readonly_master_key and not isinstance(primary_readonly_master_key, str):
raise TypeError("Expected argument 'primary_readonly_master_key' to be a str")
pulumi.set(__self__, "primary_readonly_master_key", primary_readonly_master_key)
if secondary_master_key and not isinstance(secondary_master_key, str):
raise TypeError("Expected argument 'secondary_master_key' to be a str")
pulumi.set(__self__, "secondary_master_key", secondary_master_key)
if secondary_readonly_master_key and not isinstance(secondary_readonly_master_key, str):
raise TypeError("Expected argument 'secondary_readonly_master_key' to be a str")
pulumi.set(__self__, "secondary_readonly_master_key", secondary_readonly_master_key)
@property
@pulumi.getter(name="primaryMasterKey")
def primary_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-write key.
"""
return pulumi.get(self, "primary_master_key")
@property
@pulumi.getter(name="primaryReadonlyMasterKey")
def primary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-only key.
"""
return pulumi.get(self, "primary_readonly_master_key")
@property
@pulumi.getter(name="secondaryMasterKey")
def secondary_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-write key.
"""
return pulumi.get(self, "secondary_master_key")
@property
@pulumi.getter(name="secondaryReadonlyMasterKey")
def secondary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-only key.
"""
return pulumi.get(self, "secondary_readonly_master_key")
class AwaitableListDatabaseAccountKeysResult(ListDatabaseAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabaseAccountKeysResult(
primary_master_key=self.primary_master_key,
primary_readonly_master_key=self.primary_readonly_master_key,
secondary_master_key=self.secondary_master_key,
secondary_readonly_master_key=self.secondary_readonly_master_key)
def list_database_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value
return AwaitableListDatabaseAccountKeysResult(
primary_master_key=__ret__.primary_master_key,
primary_readonly_master_key=__ret__.primary_readonly_master_key,
secondary_master_key=__ret__.secondary_master_key,
secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
|
run_recipe
|
Given a recipe, calls the appropriate query and returns the result.
The provided recipe name is used to make a call to the modules.
:param str recipe: name of the recipe to be run.
:param list args: remainder arguments that were unparsed.
:param Configuration config: config object.
:returns: string
|
from __future__ import print_function, absolute_import
import importlib
import logging
import os
from argparse import ArgumentParser
from six import string_types
from adr.formatter import all_formatters
from .errors import MissingDataError
log = logging.getLogger('adr')
here = os.path.abspath(os.path.dirname(__file__))
RECIPE_DIR = os.path.join(here, 'recipes')
ARGUMENT_GROUPS = {
'branch': [
[['-B', '--branch'],
{'default': ['mozilla-central'],
'action': 'append',
'help': "Branches to query results from",
}],
],
'build': [
[['-b', '--build-type'],
{'default': 'opt',
'help': "Build type (default: opt)",
}],
],
'date': [
[['--from'],
{'dest': 'from_date',
'default': 'today-week',
'help': "Starting date to pull data from, defaults "
"to a week ago",
}],
[['--to'],
{'dest': 'to_date',
'default': 'eod', # end of day
'help': "Ending date to pull data from, defaults "
"to now",
}],
],
'path': [
[['--path'],
{'required': True,
'help': "Path relative to repository root (file or directory)",
}],
],
'platform': [
[['-p', '--platform'],
{'default': 'windows10-64',
'help': "Platform to limit results to (default: windows10-64)",
}],
],
'rev': [
[['-r', '--revision'],
{'dest': 'rev',
'required': True,
'help': "Revision to limit results to",
}],
],
'test': [
[['-t', '--test'],
{'required': True,
'dest': 'test_name',
'help': "Path to a test file",
}],
],
}
"""
These are commonly used arguments which can be re-used. They are shared to
provide a consistent CLI across recipes.
"""
class RecipeParser(ArgumentParser):
arguments = []
def __init__(self, *groups, **kwargs):
ArgumentParser.__init__(self, **kwargs)
for cli, kwargs in self.arguments:
self.add_argument(*cli, **kwargs)
for name in groups:
group = self.add_argument_group("{} arguments".format(name))
arguments = ARGUMENT_GROUPS[name]
for cli, kwargs in arguments:
group.add_argument(*cli, **kwargs)
# MASKED: run_recipe function (lines 95-116)
|
def run_recipe(recipe, args, config):
"""Given a recipe, calls the appropriate query and returns the result.
The provided recipe name is used to make a call to the modules.
:param str recipe: name of the recipe to be run.
:param list args: remainder arguments that were unparsed.
:param Configuration config: config object.
:returns: string
"""
modname = '.recipes.{}'.format(recipe)
mod = importlib.import_module(modname, package='adr')
try:
output = mod.run(args, config)
except MissingDataError:
return "ActiveData didn\'t return any data."
if isinstance(config.fmt, string_types):
fmt = all_formatters[config.fmt]
log.debug("Result:")
return fmt(output)
| 95 | 116 |
from __future__ import print_function, absolute_import
import importlib
import logging
import os
from argparse import ArgumentParser
from six import string_types
from adr.formatter import all_formatters
from .errors import MissingDataError
log = logging.getLogger('adr')
here = os.path.abspath(os.path.dirname(__file__))
RECIPE_DIR = os.path.join(here, 'recipes')
ARGUMENT_GROUPS = {
'branch': [
[['-B', '--branch'],
{'default': ['mozilla-central'],
'action': 'append',
'help': "Branches to query results from",
}],
],
'build': [
[['-b', '--build-type'],
{'default': 'opt',
'help': "Build type (default: opt)",
}],
],
'date': [
[['--from'],
{'dest': 'from_date',
'default': 'today-week',
'help': "Starting date to pull data from, defaults "
"to a week ago",
}],
[['--to'],
{'dest': 'to_date',
'default': 'eod', # end of day
'help': "Ending date to pull data from, defaults "
"to now",
}],
],
'path': [
[['--path'],
{'required': True,
'help': "Path relative to repository root (file or directory)",
}],
],
'platform': [
[['-p', '--platform'],
{'default': 'windows10-64',
'help': "Platform to limit results to (default: windows10-64)",
}],
],
'rev': [
[['-r', '--revision'],
{'dest': 'rev',
'required': True,
'help': "Revision to limit results to",
}],
],
'test': [
[['-t', '--test'],
{'required': True,
'dest': 'test_name',
'help': "Path to a test file",
}],
],
}
"""
These are commonly used arguments which can be re-used. They are shared to
provide a consistent CLI across recipes.
"""
class RecipeParser(ArgumentParser):
arguments = []
def __init__(self, *groups, **kwargs):
ArgumentParser.__init__(self, **kwargs)
for cli, kwargs in self.arguments:
self.add_argument(*cli, **kwargs)
for name in groups:
group = self.add_argument_group("{} arguments".format(name))
arguments = ARGUMENT_GROUPS[name]
for cli, kwargs in arguments:
group.add_argument(*cli, **kwargs)
def run_recipe(recipe, args, config):
"""Given a recipe, calls the appropriate query and returns the result.
The provided recipe name is used to make a call to the modules.
:param str recipe: name of the recipe to be run.
:param list args: remainder arguments that were unparsed.
:param Configuration config: config object.
:returns: string
"""
modname = '.recipes.{}'.format(recipe)
mod = importlib.import_module(modname, package='adr')
try:
output = mod.run(args, config)
except MissingDataError:
return "ActiveData didn\'t return any data."
if isinstance(config.fmt, string_types):
fmt = all_formatters[config.fmt]
log.debug("Result:")
return fmt(output)
|
read_args
|
Reads command line arguments.
Returns: Parsed arguments.
|
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
# MASKED: read_args function (lines 10-18)
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
| 10 | 18 |
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
conv_to_2D
|
Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].
|
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
# MASKED: conv_to_2D function (lines 60-79)
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
| 60 | 79 |
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
cart_to_pol
|
Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].
|
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
# MASKED: cart_to_pol function (lines 81-94)
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
| 81 | 94 |
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
ellipse_err
|
Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.
|
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
# MASKED: ellipse_err function (lines 96-124)
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
| 96 | 124 |
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
|
leakyrelu
|
leakyrelu激活函数
Args:
x (Tensor): input
leak (int): x<0时的斜率
Returns:
Tensor
|
import tensorflow as tf
# MASKED: leakyrelu function (lines 4-16)
|
def leakyrelu(x, leak=0.01):
"""
leakyrelu激活函数
Args:
x (Tensor): input
leak (int): x<0时的斜率
Returns:
Tensor
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
| 4 | 16 |
import tensorflow as tf
def leakyrelu(x, leak=0.01):
"""
leakyrelu激活函数
Args:
x (Tensor): input
leak (int): x<0时的斜率
Returns:
Tensor
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
|
est_fpos_rate
|
Estimate false positive rate of a single-token signature.
Estimates using the 'tokensplit' and trace-modeling methods,
and returns the higher (most pessimistic of the two). Note that both
of these estimates are strictly equal to or higher than the actual
fraction of streams that 'token' occurs in within the trace.
|
# Polygraph (release 0.1)
# Signature generation algorithms for polymorphic worms
#
# Copyright (c) 2004-2005, Intel Corporation
# All Rights Reserved
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
import string
class SigGen(object):
"""
Abstract class for signature generation factories.
"""
def __init__(self, pname="Pretty Name", fname="filename"): pass
def train(self, pos_samples):
"""
Generate one or more signatures from pos_samples (suspicious pool).
Returns a sequence of Sig objects.
"""
raise NotImplementedError
class Sig(object):
"""
Abstract signature class.
"""
def match(self, sample):
"Return whether current signature matches the sample"
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def regex_esc(s):
escaped = []
for c in s:
if c.isalnum():
escaped.append(c)
elif c == ' ':
escaped.append("\\ ")
elif c == "\t":
escaped.append("\\t")
elif c == "\n":
escaped.append("\\n")
elif c == "\r":
escaped.append("\\r")
elif string.punctuation.find(c) >= 0:
escaped.append("\\%s" % c)
else:
escaped.append("\\x%02x" % ord(c))
return ''.join(escaped)
estd_fpos_rate = {} # memoize
# MASKED: est_fpos_rate function (lines 60-95)
|
def est_fpos_rate(token, trace=None, stats=None):
"""
Estimate false positive rate of a single-token signature.
Estimates using the 'tokensplit' and trace-modeling methods,
and returns the higher (most pessimistic of the two). Note that both
of these estimates are strictly equal to or higher than the actual
fraction of streams that 'token' occurs in within the trace.
"""
global estd_fpos_rate
# if we don't have it cached, figure it out
if not (estd_fpos_rate.has_key(trace) and estd_fpos_rate[trace].has_key(token)):
# make sure there's a dictionary for this trace
if not estd_fpos_rate.has_key(trace):
estd_fpos_rate[trace] = {}
# use most pessimistic (highest) estimate
import polygraph.sigprob.tokensplit as tokensplit
import polygraph.sigprob.sigprob as sigprob
if trace:
split_prob = tokensplit.mpp(token, trace, minlen=3)[0]
stat_prob = tokensplit.maxcontextprob(token, trace)[0]
estd_fpos_rate[trace][token] = max(split_prob, stat_prob)
else:
estd_fpos_rate[trace][token] = sigprob.token_prob(token, 1000, stats=stats)[-1]
rv = estd_fpos_rate[trace][token]
# conserve memory
if len(token) > 20:
del estd_fpos_rate[trace][token]
if len(estd_fpos_rate[trace].keys()) > 200:
estd_fpos_rate[trace].clear() # XXX should delete least recently accessed
return rv
| 60 | 95 |
# Polygraph (release 0.1)
# Signature generation algorithms for polymorphic worms
#
# Copyright (c) 2004-2005, Intel Corporation
# All Rights Reserved
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
import string
class SigGen(object):
"""
Abstract class for signature generation factories.
"""
def __init__(self, pname="Pretty Name", fname="filename"): pass
def train(self, pos_samples):
"""
Generate one or more signatures from pos_samples (suspicious pool).
Returns a sequence of Sig objects.
"""
raise NotImplementedError
class Sig(object):
"""
Abstract signature class.
"""
def match(self, sample):
"Return whether current signature matches the sample"
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def regex_esc(s):
escaped = []
for c in s:
if c.isalnum():
escaped.append(c)
elif c == ' ':
escaped.append("\\ ")
elif c == "\t":
escaped.append("\\t")
elif c == "\n":
escaped.append("\\n")
elif c == "\r":
escaped.append("\\r")
elif string.punctuation.find(c) >= 0:
escaped.append("\\%s" % c)
else:
escaped.append("\\x%02x" % ord(c))
return ''.join(escaped)
estd_fpos_rate = {} # memoize
def est_fpos_rate(token, trace=None, stats=None):
"""
Estimate false positive rate of a single-token signature.
Estimates using the 'tokensplit' and trace-modeling methods,
and returns the higher (most pessimistic of the two). Note that both
of these estimates are strictly equal to or higher than the actual
fraction of streams that 'token' occurs in within the trace.
"""
global estd_fpos_rate
# if we don't have it cached, figure it out
if not (estd_fpos_rate.has_key(trace) and estd_fpos_rate[trace].has_key(token)):
# make sure there's a dictionary for this trace
if not estd_fpos_rate.has_key(trace):
estd_fpos_rate[trace] = {}
# use most pessimistic (highest) estimate
import polygraph.sigprob.tokensplit as tokensplit
import polygraph.sigprob.sigprob as sigprob
if trace:
split_prob = tokensplit.mpp(token, trace, minlen=3)[0]
stat_prob = tokensplit.maxcontextprob(token, trace)[0]
estd_fpos_rate[trace][token] = max(split_prob, stat_prob)
else:
estd_fpos_rate[trace][token] = sigprob.token_prob(token, 1000, stats=stats)[-1]
rv = estd_fpos_rate[trace][token]
# conserve memory
if len(token) > 20:
del estd_fpos_rate[trace][token]
if len(estd_fpos_rate[trace].keys()) > 200:
estd_fpos_rate[trace].clear() # XXX should delete least recently accessed
return rv
|
input_fn
|
Input function which provides a single batch for train or eval.
Returns:
A `tf.data.Dataset` object.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import resnet_preprocessing
class ImageNetInput(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null', not None), then construct a null
pipeline, consisting of empty images.
batch_size: The global batch size to use during training or evaluation.
"""
def __init__(self, is_training, data_dir, batch_size=1024,
use_bfloat16=False):
self.image_preprocessing_fn = resnet_preprocessing.preprocess_image
self.is_training = is_training
self.data_dir = data_dir
if self.data_dir == 'null' or self.data_dir == '':
self.data_dir = None
self.batch_size = batch_size
self.use_bfloat16 = use_bfloat16
def dataset_parser(self, value):
"""Parse an ImageNet record from a serialized string Tensor."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, ''),
'image/format':
tf.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label':
tf.FixedLenFeature([], tf.int64, -1),
'image/class/text':
tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
use_bfloat16=self.use_bfloat16)
# Subtract one so that labels are in [0, 1000), and cast to float32 for
# Keras model.
label = tf.cast(tf.cast(
tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1,
dtype=tf.float32)
return image, label
# MASKED: input_fn function (lines 100-138)
def input_fn_null(self):
"""Input function which provides null (black) images."""
dataset = tf.data.Dataset.range(1).repeat().map(self._get_null_input)
dataset = dataset.prefetch(self.batch_size)
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.prefetch(32) # Prefetch overlaps in-feed with training
tf.logging.info('Input dataset: %s', str(dataset))
return dataset
def _get_null_input(self, _):
null_image = tf.zeros([224, 224, 3], tf.float32)
return null_image, tf.constant(0, tf.float32)
|
def input_fn(self):
"""Input function which provides a single batch for train or eval.
Returns:
A `tf.data.Dataset` object.
"""
if self.data_dir is None:
tf.logging.info('Using fake input.')
return self.input_fn_null()
# Shuffle the filenames to ensure better randomization.
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_dataset, cycle_length=16, sloppy=True))
dataset = dataset.shuffle(1024)
# Parse, pre-process, and batch the data in parallel
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
self.dataset_parser, batch_size=self.batch_size,
num_parallel_batches=2,
drop_remainder=True))
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
| 100 | 138 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import resnet_preprocessing
class ImageNetInput(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null', not None), then construct a null
pipeline, consisting of empty images.
batch_size: The global batch size to use during training or evaluation.
"""
def __init__(self, is_training, data_dir, batch_size=1024,
use_bfloat16=False):
self.image_preprocessing_fn = resnet_preprocessing.preprocess_image
self.is_training = is_training
self.data_dir = data_dir
if self.data_dir == 'null' or self.data_dir == '':
self.data_dir = None
self.batch_size = batch_size
self.use_bfloat16 = use_bfloat16
def dataset_parser(self, value):
"""Parse an ImageNet record from a serialized string Tensor."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, ''),
'image/format':
tf.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label':
tf.FixedLenFeature([], tf.int64, -1),
'image/class/text':
tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
use_bfloat16=self.use_bfloat16)
# Subtract one so that labels are in [0, 1000), and cast to float32 for
# Keras model.
label = tf.cast(tf.cast(
tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1,
dtype=tf.float32)
return image, label
def input_fn(self):
"""Input function which provides a single batch for train or eval.
Returns:
A `tf.data.Dataset` object.
"""
if self.data_dir is None:
tf.logging.info('Using fake input.')
return self.input_fn_null()
# Shuffle the filenames to ensure better randomization.
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_dataset, cycle_length=16, sloppy=True))
dataset = dataset.shuffle(1024)
# Parse, pre-process, and batch the data in parallel
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
self.dataset_parser, batch_size=self.batch_size,
num_parallel_batches=2,
drop_remainder=True))
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def input_fn_null(self):
"""Input function which provides null (black) images."""
dataset = tf.data.Dataset.range(1).repeat().map(self._get_null_input)
dataset = dataset.prefetch(self.batch_size)
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.prefetch(32) # Prefetch overlaps in-feed with training
tf.logging.info('Input dataset: %s', str(dataset))
return dataset
def _get_null_input(self, _):
null_image = tf.zeros([224, 224, 3], tf.float32)
return null_image, tf.constant(0, tf.float32)
|
login
|
Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
|
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
# MASKED: login function (lines 59-83)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
| 59 | 83 |
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
callback
|
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
|
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
# MASKED: callback function (lines 86-117)
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
| 86 | 117 |
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
logout
|
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
|
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# MASKED: logout function (lines 120-135)
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
| 120 | 135 |
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
recommendations
|
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
|
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
# MASKED: recommendations function (lines 188-240)
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
| 188 | 240 |
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
recommendations_json
|
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
|
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
# MASKED: recommendations_json function (lines 243-286)
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
| 243 | 286 |
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
recommendations_service_api
|
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
|
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
# MASKED: recommendations_service_api function (lines 289-369)
|
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
| 289 | 369 |
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
|
link_iterable_by_fields
|
Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``.
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import mapping, Database, databases
from ..units import normalize_units as normalize_units_function
from ..errors import StrategyError
from ..utils import activity_hash, DEFAULT_FIELDS
from copy import deepcopy
import numbers
import numpy as np
import pprint
def format_nonunique_key_error(obj, fields, others):
template = """Object in source database can't be uniquely linked to target database.\nProblematic dataset is:\n{ds}\nPossible targets include (at least one not shown):\n{targets}"""
fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']
_ = lambda x: {field: x.get(field, "(missing)") for field in fields_to_print}
return template.format(
ds=pprint.pformat(_(obj)),
targets=pprint.pformat([_(x) for x in others])
)
# MASKED: link_iterable_by_fields function (lines 25-71)
def assign_only_product_as_production(db):
"""Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
"""
for ds in db:
if ds.get("reference product"):
continue
products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']
if len(products) == 1:
product = products[0]
assert product['name']
ds['reference product'] = product['name']
ds['production amount'] = product['amount']
ds['name'] = ds.get('name') or product['name']
ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'
return db
def link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):
"""Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields."""
TECHNOSPHERE_TYPES = {"technosphere", "substitution", "production"}
if external_db_name is not None:
if external_db_name not in databases:
raise StrategyError("Can't find external database {}".format(
external_db_name))
other = (obj for obj in Database(external_db_name)
if obj.get('type', 'process') == 'process')
internal = False
else:
other = None
internal = True
return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)
def set_code_by_activity_hash(db, overwrite=False):
"""Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``."""
for ds in db:
if 'code' not in ds or overwrite:
ds['code'] = activity_hash(ds)
return db
def tupleize_categories(db):
for ds in db:
if ds.get('categories'):
ds['categories'] = tuple(ds['categories'])
for exc in ds.get('exchanges', []):
if exc.get('categories'):
exc['categories'] = tuple(exc['categories'])
return db
def drop_unlinked(db):
"""This is the nuclear option - use at your own risk!"""
for ds in db:
ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]
return db
def normalize_units(db):
"""Normalize units in datasets and their exchanges"""
for ds in db:
if 'unit' in ds:
ds['unit'] = normalize_units_function(ds['unit'])
for exc in ds.get('exchanges', []):
if 'unit' in exc:
exc['unit'] = normalize_units_function(exc['unit'])
for param in ds.get('parameters', {}).values():
if 'unit' in param:
param['unit'] = normalize_units_function(param['unit'])
return db
def add_database_name(db, name):
"""Add database name to datasets"""
for ds in db:
ds['database'] = name
return db
def convert_uncertainty_types_to_integers(db):
"""Generic number conversion function convert to floats. Return to integers."""
for ds in db:
for exc in ds['exchanges']:
try:
exc['uncertainty type'] = int(exc['uncertainty type'])
except:
pass
return db
def drop_falsey_uncertainty_fields_but_keep_zeros(db):
"""Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
"""
uncertainty_fields = [
'minimum',
'maximum',
'scale',
'shape',
'loc',
]
def drop_if_appropriate(exc):
for field in uncertainty_fields:
if field not in exc or exc[field] == 0:
continue
elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):
continue
elif not exc[field]:
del exc[field]
for ds in db:
for exc in ds['exchanges']:
drop_if_appropriate(exc)
return db
def convert_activity_parameters_to_list(data):
"""Convert activity parameters from dictionary to list of dictionaries"""
def _(key, value):
dct = deepcopy(value)
dct['name'] = key
return dct
for ds in data:
if 'parameters' in ds:
ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]
return data
|
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
| 25 | 71 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import mapping, Database, databases
from ..units import normalize_units as normalize_units_function
from ..errors import StrategyError
from ..utils import activity_hash, DEFAULT_FIELDS
from copy import deepcopy
import numbers
import numpy as np
import pprint
def format_nonunique_key_error(obj, fields, others):
template = """Object in source database can't be uniquely linked to target database.\nProblematic dataset is:\n{ds}\nPossible targets include (at least one not shown):\n{targets}"""
fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']
_ = lambda x: {field: x.get(field, "(missing)") for field in fields_to_print}
return template.format(
ds=pprint.pformat(_(obj)),
targets=pprint.pformat([_(x) for x in others])
)
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
def assign_only_product_as_production(db):
"""Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
"""
for ds in db:
if ds.get("reference product"):
continue
products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']
if len(products) == 1:
product = products[0]
assert product['name']
ds['reference product'] = product['name']
ds['production amount'] = product['amount']
ds['name'] = ds.get('name') or product['name']
ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'
return db
def link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):
"""Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields."""
TECHNOSPHERE_TYPES = {"technosphere", "substitution", "production"}
if external_db_name is not None:
if external_db_name not in databases:
raise StrategyError("Can't find external database {}".format(
external_db_name))
other = (obj for obj in Database(external_db_name)
if obj.get('type', 'process') == 'process')
internal = False
else:
other = None
internal = True
return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)
def set_code_by_activity_hash(db, overwrite=False):
"""Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``."""
for ds in db:
if 'code' not in ds or overwrite:
ds['code'] = activity_hash(ds)
return db
def tupleize_categories(db):
for ds in db:
if ds.get('categories'):
ds['categories'] = tuple(ds['categories'])
for exc in ds.get('exchanges', []):
if exc.get('categories'):
exc['categories'] = tuple(exc['categories'])
return db
def drop_unlinked(db):
"""This is the nuclear option - use at your own risk!"""
for ds in db:
ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]
return db
def normalize_units(db):
"""Normalize units in datasets and their exchanges"""
for ds in db:
if 'unit' in ds:
ds['unit'] = normalize_units_function(ds['unit'])
for exc in ds.get('exchanges', []):
if 'unit' in exc:
exc['unit'] = normalize_units_function(exc['unit'])
for param in ds.get('parameters', {}).values():
if 'unit' in param:
param['unit'] = normalize_units_function(param['unit'])
return db
def add_database_name(db, name):
"""Add database name to datasets"""
for ds in db:
ds['database'] = name
return db
def convert_uncertainty_types_to_integers(db):
"""Generic number conversion function convert to floats. Return to integers."""
for ds in db:
for exc in ds['exchanges']:
try:
exc['uncertainty type'] = int(exc['uncertainty type'])
except:
pass
return db
def drop_falsey_uncertainty_fields_but_keep_zeros(db):
"""Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
"""
uncertainty_fields = [
'minimum',
'maximum',
'scale',
'shape',
'loc',
]
def drop_if_appropriate(exc):
for field in uncertainty_fields:
if field not in exc or exc[field] == 0:
continue
elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):
continue
elif not exc[field]:
del exc[field]
for ds in db:
for exc in ds['exchanges']:
drop_if_appropriate(exc)
return db
def convert_activity_parameters_to_list(data):
"""Convert activity parameters from dictionary to list of dictionaries"""
def _(key, value):
dct = deepcopy(value)
dct['name'] = key
return dct
for ds in data:
if 'parameters' in ds:
ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]
return data
|
assign_only_product_as_production
|
Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import mapping, Database, databases
from ..units import normalize_units as normalize_units_function
from ..errors import StrategyError
from ..utils import activity_hash, DEFAULT_FIELDS
from copy import deepcopy
import numbers
import numpy as np
import pprint
def format_nonunique_key_error(obj, fields, others):
template = """Object in source database can't be uniquely linked to target database.\nProblematic dataset is:\n{ds}\nPossible targets include (at least one not shown):\n{targets}"""
fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']
_ = lambda x: {field: x.get(field, "(missing)") for field in fields_to_print}
return template.format(
ds=pprint.pformat(_(obj)),
targets=pprint.pformat([_(x) for x in others])
)
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
# MASKED: assign_only_product_as_production function (lines 74-97)
def link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):
"""Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields."""
TECHNOSPHERE_TYPES = {"technosphere", "substitution", "production"}
if external_db_name is not None:
if external_db_name not in databases:
raise StrategyError("Can't find external database {}".format(
external_db_name))
other = (obj for obj in Database(external_db_name)
if obj.get('type', 'process') == 'process')
internal = False
else:
other = None
internal = True
return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)
def set_code_by_activity_hash(db, overwrite=False):
"""Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``."""
for ds in db:
if 'code' not in ds or overwrite:
ds['code'] = activity_hash(ds)
return db
def tupleize_categories(db):
for ds in db:
if ds.get('categories'):
ds['categories'] = tuple(ds['categories'])
for exc in ds.get('exchanges', []):
if exc.get('categories'):
exc['categories'] = tuple(exc['categories'])
return db
def drop_unlinked(db):
"""This is the nuclear option - use at your own risk!"""
for ds in db:
ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]
return db
def normalize_units(db):
"""Normalize units in datasets and their exchanges"""
for ds in db:
if 'unit' in ds:
ds['unit'] = normalize_units_function(ds['unit'])
for exc in ds.get('exchanges', []):
if 'unit' in exc:
exc['unit'] = normalize_units_function(exc['unit'])
for param in ds.get('parameters', {}).values():
if 'unit' in param:
param['unit'] = normalize_units_function(param['unit'])
return db
def add_database_name(db, name):
"""Add database name to datasets"""
for ds in db:
ds['database'] = name
return db
def convert_uncertainty_types_to_integers(db):
"""Generic number conversion function convert to floats. Return to integers."""
for ds in db:
for exc in ds['exchanges']:
try:
exc['uncertainty type'] = int(exc['uncertainty type'])
except:
pass
return db
def drop_falsey_uncertainty_fields_but_keep_zeros(db):
"""Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
"""
uncertainty_fields = [
'minimum',
'maximum',
'scale',
'shape',
'loc',
]
def drop_if_appropriate(exc):
for field in uncertainty_fields:
if field not in exc or exc[field] == 0:
continue
elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):
continue
elif not exc[field]:
del exc[field]
for ds in db:
for exc in ds['exchanges']:
drop_if_appropriate(exc)
return db
def convert_activity_parameters_to_list(data):
"""Convert activity parameters from dictionary to list of dictionaries"""
def _(key, value):
dct = deepcopy(value)
dct['name'] = key
return dct
for ds in data:
if 'parameters' in ds:
ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]
return data
|
def assign_only_product_as_production(db):
"""Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
"""
for ds in db:
if ds.get("reference product"):
continue
products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']
if len(products) == 1:
product = products[0]
assert product['name']
ds['reference product'] = product['name']
ds['production amount'] = product['amount']
ds['name'] = ds.get('name') or product['name']
ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'
return db
| 74 | 97 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import mapping, Database, databases
from ..units import normalize_units as normalize_units_function
from ..errors import StrategyError
from ..utils import activity_hash, DEFAULT_FIELDS
from copy import deepcopy
import numbers
import numpy as np
import pprint
def format_nonunique_key_error(obj, fields, others):
template = """Object in source database can't be uniquely linked to target database.\nProblematic dataset is:\n{ds}\nPossible targets include (at least one not shown):\n{targets}"""
fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']
_ = lambda x: {field: x.get(field, "(missing)") for field in fields_to_print}
return template.format(
ds=pprint.pformat(_(obj)),
targets=pprint.pformat([_(x) for x in others])
)
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
def assign_only_product_as_production(db):
"""Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
"""
for ds in db:
if ds.get("reference product"):
continue
products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']
if len(products) == 1:
product = products[0]
assert product['name']
ds['reference product'] = product['name']
ds['production amount'] = product['amount']
ds['name'] = ds.get('name') or product['name']
ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'
return db
def link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):
"""Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields."""
TECHNOSPHERE_TYPES = {"technosphere", "substitution", "production"}
if external_db_name is not None:
if external_db_name not in databases:
raise StrategyError("Can't find external database {}".format(
external_db_name))
other = (obj for obj in Database(external_db_name)
if obj.get('type', 'process') == 'process')
internal = False
else:
other = None
internal = True
return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)
def set_code_by_activity_hash(db, overwrite=False):
"""Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``."""
for ds in db:
if 'code' not in ds or overwrite:
ds['code'] = activity_hash(ds)
return db
def tupleize_categories(db):
for ds in db:
if ds.get('categories'):
ds['categories'] = tuple(ds['categories'])
for exc in ds.get('exchanges', []):
if exc.get('categories'):
exc['categories'] = tuple(exc['categories'])
return db
def drop_unlinked(db):
"""This is the nuclear option - use at your own risk!"""
for ds in db:
ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]
return db
def normalize_units(db):
"""Normalize units in datasets and their exchanges"""
for ds in db:
if 'unit' in ds:
ds['unit'] = normalize_units_function(ds['unit'])
for exc in ds.get('exchanges', []):
if 'unit' in exc:
exc['unit'] = normalize_units_function(exc['unit'])
for param in ds.get('parameters', {}).values():
if 'unit' in param:
param['unit'] = normalize_units_function(param['unit'])
return db
def add_database_name(db, name):
"""Add database name to datasets"""
for ds in db:
ds['database'] = name
return db
def convert_uncertainty_types_to_integers(db):
"""Generic number conversion function convert to floats. Return to integers."""
for ds in db:
for exc in ds['exchanges']:
try:
exc['uncertainty type'] = int(exc['uncertainty type'])
except:
pass
return db
def drop_falsey_uncertainty_fields_but_keep_zeros(db):
"""Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
"""
uncertainty_fields = [
'minimum',
'maximum',
'scale',
'shape',
'loc',
]
def drop_if_appropriate(exc):
for field in uncertainty_fields:
if field not in exc or exc[field] == 0:
continue
elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):
continue
elif not exc[field]:
del exc[field]
for ds in db:
for exc in ds['exchanges']:
drop_if_appropriate(exc)
return db
def convert_activity_parameters_to_list(data):
"""Convert activity parameters from dictionary to list of dictionaries"""
def _(key, value):
dct = deepcopy(value)
dct['name'] = key
return dct
for ds in data:
if 'parameters' in ds:
ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]
return data
|
link_technosphere_by_activity_hash
|
Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields.
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import mapping, Database, databases
from ..units import normalize_units as normalize_units_function
from ..errors import StrategyError
from ..utils import activity_hash, DEFAULT_FIELDS
from copy import deepcopy
import numbers
import numpy as np
import pprint
def format_nonunique_key_error(obj, fields, others):
template = """Object in source database can't be uniquely linked to target database.\nProblematic dataset is:\n{ds}\nPossible targets include (at least one not shown):\n{targets}"""
fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']
_ = lambda x: {field: x.get(field, "(missing)") for field in fields_to_print}
return template.format(
ds=pprint.pformat(_(obj)),
targets=pprint.pformat([_(x) for x in others])
)
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
def assign_only_product_as_production(db):
"""Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
"""
for ds in db:
if ds.get("reference product"):
continue
products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']
if len(products) == 1:
product = products[0]
assert product['name']
ds['reference product'] = product['name']
ds['production amount'] = product['amount']
ds['name'] = ds.get('name') or product['name']
ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'
return db
# MASKED: link_technosphere_by_activity_hash function (lines 100-117)
def set_code_by_activity_hash(db, overwrite=False):
"""Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``."""
for ds in db:
if 'code' not in ds or overwrite:
ds['code'] = activity_hash(ds)
return db
def tupleize_categories(db):
for ds in db:
if ds.get('categories'):
ds['categories'] = tuple(ds['categories'])
for exc in ds.get('exchanges', []):
if exc.get('categories'):
exc['categories'] = tuple(exc['categories'])
return db
def drop_unlinked(db):
"""This is the nuclear option - use at your own risk!"""
for ds in db:
ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]
return db
def normalize_units(db):
"""Normalize units in datasets and their exchanges"""
for ds in db:
if 'unit' in ds:
ds['unit'] = normalize_units_function(ds['unit'])
for exc in ds.get('exchanges', []):
if 'unit' in exc:
exc['unit'] = normalize_units_function(exc['unit'])
for param in ds.get('parameters', {}).values():
if 'unit' in param:
param['unit'] = normalize_units_function(param['unit'])
return db
def add_database_name(db, name):
"""Add database name to datasets"""
for ds in db:
ds['database'] = name
return db
def convert_uncertainty_types_to_integers(db):
"""Generic number conversion function convert to floats. Return to integers."""
for ds in db:
for exc in ds['exchanges']:
try:
exc['uncertainty type'] = int(exc['uncertainty type'])
except:
pass
return db
def drop_falsey_uncertainty_fields_but_keep_zeros(db):
"""Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
"""
uncertainty_fields = [
'minimum',
'maximum',
'scale',
'shape',
'loc',
]
def drop_if_appropriate(exc):
for field in uncertainty_fields:
if field not in exc or exc[field] == 0:
continue
elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):
continue
elif not exc[field]:
del exc[field]
for ds in db:
for exc in ds['exchanges']:
drop_if_appropriate(exc)
return db
def convert_activity_parameters_to_list(data):
"""Convert activity parameters from dictionary to list of dictionaries"""
def _(key, value):
dct = deepcopy(value)
dct['name'] = key
return dct
for ds in data:
if 'parameters' in ds:
ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]
return data
|
def link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):
"""Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields."""
TECHNOSPHERE_TYPES = {"technosphere", "substitution", "production"}
if external_db_name is not None:
if external_db_name not in databases:
raise StrategyError("Can't find external database {}".format(
external_db_name))
other = (obj for obj in Database(external_db_name)
if obj.get('type', 'process') == 'process')
internal = False
else:
other = None
internal = True
return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)
| 100 | 117 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import mapping, Database, databases
from ..units import normalize_units as normalize_units_function
from ..errors import StrategyError
from ..utils import activity_hash, DEFAULT_FIELDS
from copy import deepcopy
import numbers
import numpy as np
import pprint
def format_nonunique_key_error(obj, fields, others):
template = """Object in source database can't be uniquely linked to target database.\nProblematic dataset is:\n{ds}\nPossible targets include (at least one not shown):\n{targets}"""
fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']
_ = lambda x: {field: x.get(field, "(missing)") for field in fields_to_print}
return template.format(
ds=pprint.pformat(_(obj)),
targets=pprint.pformat([_(x) for x in others])
)
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
def assign_only_product_as_production(db):
"""Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
"""
for ds in db:
if ds.get("reference product"):
continue
products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']
if len(products) == 1:
product = products[0]
assert product['name']
ds['reference product'] = product['name']
ds['production amount'] = product['amount']
ds['name'] = ds.get('name') or product['name']
ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'
return db
def link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):
"""Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields."""
TECHNOSPHERE_TYPES = {"technosphere", "substitution", "production"}
if external_db_name is not None:
if external_db_name not in databases:
raise StrategyError("Can't find external database {}".format(
external_db_name))
other = (obj for obj in Database(external_db_name)
if obj.get('type', 'process') == 'process')
internal = False
else:
other = None
internal = True
return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)
def set_code_by_activity_hash(db, overwrite=False):
"""Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``."""
for ds in db:
if 'code' not in ds or overwrite:
ds['code'] = activity_hash(ds)
return db
def tupleize_categories(db):
for ds in db:
if ds.get('categories'):
ds['categories'] = tuple(ds['categories'])
for exc in ds.get('exchanges', []):
if exc.get('categories'):
exc['categories'] = tuple(exc['categories'])
return db
def drop_unlinked(db):
"""This is the nuclear option - use at your own risk!"""
for ds in db:
ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]
return db
def normalize_units(db):
"""Normalize units in datasets and their exchanges"""
for ds in db:
if 'unit' in ds:
ds['unit'] = normalize_units_function(ds['unit'])
for exc in ds.get('exchanges', []):
if 'unit' in exc:
exc['unit'] = normalize_units_function(exc['unit'])
for param in ds.get('parameters', {}).values():
if 'unit' in param:
param['unit'] = normalize_units_function(param['unit'])
return db
def add_database_name(db, name):
"""Add database name to datasets"""
for ds in db:
ds['database'] = name
return db
def convert_uncertainty_types_to_integers(db):
"""Generic number conversion function convert to floats. Return to integers."""
for ds in db:
for exc in ds['exchanges']:
try:
exc['uncertainty type'] = int(exc['uncertainty type'])
except:
pass
return db
def drop_falsey_uncertainty_fields_but_keep_zeros(db):
"""Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
"""
uncertainty_fields = [
'minimum',
'maximum',
'scale',
'shape',
'loc',
]
def drop_if_appropriate(exc):
for field in uncertainty_fields:
if field not in exc or exc[field] == 0:
continue
elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):
continue
elif not exc[field]:
del exc[field]
for ds in db:
for exc in ds['exchanges']:
drop_if_appropriate(exc)
return db
def convert_activity_parameters_to_list(data):
"""Convert activity parameters from dictionary to list of dictionaries"""
def _(key, value):
dct = deepcopy(value)
dct['name'] = key
return dct
for ds in data:
if 'parameters' in ds:
ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]
return data
|
AccountListVolumes
|
Show the list of volumes for an account
Args:
account_name: the name of the account
account_id: the ID of the account
by_id: show volume IDs instead of names
mvip: the management IP of the cluster
username: the admin user of the cluster
password: the admin password of the cluster
output_format: the format to display the information
|
#!/usr/bin/env python
"""
This action will display a list of volumes for an account
"""
from libsf.apputil import PythonApp
from libsf.argutil import SFArgumentParser, GetFirstLine, SFArgFormatter
from libsf.logutil import GetLogger, logargs
from libsf.sfcluster import SFCluster
from libsf.util import ValidateAndDefault, NameOrID, IPv4AddressType, BoolType, StrType, OptionalValueType, SelectionType, SolidFireIDType
from libsf import sfdefaults
from libsf import SolidFireError, UnknownObjectError
import sys
import json
# MASKED: AccountListVolumes function (lines 17-87)
if __name__ == '__main__':
parser = SFArgumentParser(description=GetFirstLine(__doc__), formatter_class=SFArgFormatter)
parser.add_cluster_mvip_args()
parser.add_account_selection_args()
parser.add_argument("--byid", action="store_true", default=False, dest="by_id", help="display volume IDs instead of volume names")
parser.add_console_format_args()
args = parser.parse_args_to_dict()
app = PythonApp(AccountListVolumes, args)
app.Run(**args)
|
@logargs
@ValidateAndDefault({
# "arg_name" : (arg_type, arg_default)
"account_name" : (OptionalValueType(StrType), None),
"account_id" : (OptionalValueType(SolidFireIDType), None),
"by_id" : (BoolType, False),
"mvip" : (IPv4AddressType, sfdefaults.mvip),
"username" : (StrType, sfdefaults.username),
"password" : (StrType, sfdefaults.password),
"output_format" : (OptionalValueType(SelectionType(sfdefaults.all_output_formats)), None),
})
def AccountListVolumes(account_name,
account_id,
by_id,
mvip,
username,
password,
output_format):
"""
Show the list of volumes for an account
Args:
account_name: the name of the account
account_id: the ID of the account
by_id: show volume IDs instead of names
mvip: the management IP of the cluster
username: the admin user of the cluster
password: the admin password of the cluster
output_format: the format to display the information
"""
log = GetLogger()
NameOrID(account_name, account_id, "account")
log.info("Searching for accounts")
try:
account = SFCluster(mvip, username, password).FindAccount(accountName=account_name, accountID=account_id)
except UnknownObjectError:
log.error("Account does not exists")
return False
except SolidFireError as e:
log.error("Could not search for accounts: {}".format(e))
return False
log.info("Searching for volumes")
try:
all_volumes = SFCluster(mvip, username, password).ListActiveVolumes()
all_volumes += SFCluster(mvip, username, password).ListDeletedVolumes()
except SolidFireError as e:
log.error("Could not search for volumes: {}".format(e))
return False
all_volumes = {vol["volumeID"] : vol for vol in all_volumes}
attr = "name"
if by_id:
attr = "volumeID"
account_volumes = [all_volumes[vid][attr] for vid in account.volumes]
# Display the list in the requested format
if output_format and output_format == "bash":
sys.stdout.write(" ".join([str(item) for item in account_volumes]) + "\n")
sys.stdout.flush()
elif output_format and output_format == "json":
sys.stdout.write(json.dumps({"volumes" : account_volumes}) + "\n")
sys.stdout.flush()
else:
log.info("{} volumes in account {}".format(len(account.volumes), account.username))
if account.volumes:
log.info(" {}".format(", ".join([str(item) for item in account_volumes])))
return True
| 17 | 87 |
#!/usr/bin/env python
"""
This action will display a list of volumes for an account
"""
from libsf.apputil import PythonApp
from libsf.argutil import SFArgumentParser, GetFirstLine, SFArgFormatter
from libsf.logutil import GetLogger, logargs
from libsf.sfcluster import SFCluster
from libsf.util import ValidateAndDefault, NameOrID, IPv4AddressType, BoolType, StrType, OptionalValueType, SelectionType, SolidFireIDType
from libsf import sfdefaults
from libsf import SolidFireError, UnknownObjectError
import sys
import json
@logargs
@ValidateAndDefault({
# "arg_name" : (arg_type, arg_default)
"account_name" : (OptionalValueType(StrType), None),
"account_id" : (OptionalValueType(SolidFireIDType), None),
"by_id" : (BoolType, False),
"mvip" : (IPv4AddressType, sfdefaults.mvip),
"username" : (StrType, sfdefaults.username),
"password" : (StrType, sfdefaults.password),
"output_format" : (OptionalValueType(SelectionType(sfdefaults.all_output_formats)), None),
})
def AccountListVolumes(account_name,
account_id,
by_id,
mvip,
username,
password,
output_format):
"""
Show the list of volumes for an account
Args:
account_name: the name of the account
account_id: the ID of the account
by_id: show volume IDs instead of names
mvip: the management IP of the cluster
username: the admin user of the cluster
password: the admin password of the cluster
output_format: the format to display the information
"""
log = GetLogger()
NameOrID(account_name, account_id, "account")
log.info("Searching for accounts")
try:
account = SFCluster(mvip, username, password).FindAccount(accountName=account_name, accountID=account_id)
except UnknownObjectError:
log.error("Account does not exists")
return False
except SolidFireError as e:
log.error("Could not search for accounts: {}".format(e))
return False
log.info("Searching for volumes")
try:
all_volumes = SFCluster(mvip, username, password).ListActiveVolumes()
all_volumes += SFCluster(mvip, username, password).ListDeletedVolumes()
except SolidFireError as e:
log.error("Could not search for volumes: {}".format(e))
return False
all_volumes = {vol["volumeID"] : vol for vol in all_volumes}
attr = "name"
if by_id:
attr = "volumeID"
account_volumes = [all_volumes[vid][attr] for vid in account.volumes]
# Display the list in the requested format
if output_format and output_format == "bash":
sys.stdout.write(" ".join([str(item) for item in account_volumes]) + "\n")
sys.stdout.flush()
elif output_format and output_format == "json":
sys.stdout.write(json.dumps({"volumes" : account_volumes}) + "\n")
sys.stdout.flush()
else:
log.info("{} volumes in account {}".format(len(account.volumes), account.username))
if account.volumes:
log.info(" {}".format(", ".join([str(item) for item in account_volumes])))
return True
if __name__ == '__main__':
parser = SFArgumentParser(description=GetFirstLine(__doc__), formatter_class=SFArgFormatter)
parser.add_cluster_mvip_args()
parser.add_account_selection_args()
parser.add_argument("--byid", action="store_true", default=False, dest="by_id", help="display volume IDs instead of volume names")
parser.add_console_format_args()
args = parser.parse_args_to_dict()
app = PythonApp(AccountListVolumes, args)
app.Run(**args)
|
_add_processname_features
|
Add process name default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
path_separator : str
Path separator for OS
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
r"""
eventcluster module.
This module is intended to be used to summarize large numbers of events
into clusters of different patterns. High volume repeating events can
often make it difficult to see unique and interesting items.
The module contains functions to generate clusterable features from
string data. For example, an administration command that does some
maintenance on thousands of servers with a commandline such as:
``install-update -hostname {host.fqdn} -tmp:/tmp/{GUID}/rollback``\ can
be collapsed into a single cluster pattern by ignoring the character
values in the string and using delimiters or tokens to group the values.
This is an unsupervised learning module implemented using SciKit Learn
DBScan.
Contains:
dbcluster_events: generic clustering method using DBSCAN designed to summarize
process events and other similar data by grouping on common features.
add_process_features: derives numerical features from text features such as
commandline and process path.
"""
from binascii import crc32
from functools import lru_cache
from math import log10, floor
import re
from typing import List, Any, Tuple, Union
import numpy as np
import pandas as pd
from ..common.exceptions import MsticpyImportExtraError
from ..common.utility import export
from .._version import VERSION
try:
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import Normalizer
import matplotlib.pyplot as plt
from matplotlib import cm
except ImportError as imp_err:
raise MsticpyImportExtraError(
"Cannot use this feature without Sklearn and matplotlib installed",
title="Error importing Scikit Learn and matplotlib",
extra="ml",
) from imp_err
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=too-many-arguments, too-many-locals
@export
def dbcluster_events(
data: Any,
cluster_columns: List[Any] = None,
verbose: bool = False,
normalize: bool = True,
time_column: str = "TimeCreatedUtc",
max_cluster_distance: float = 0.01,
min_cluster_samples: int = 2,
**kwargs,
) -> Tuple[pd.DataFrame, DBSCAN, np.ndarray]:
"""
Cluster data set according to cluster_columns features.
Parameters
----------
data : Any
Input data as a pandas DataFrame or numpy array
cluster_columns : List[Any], optional
List of columns to use for features
- for DataFrame this is a list of column names
- for numpy array this is a list of column indexes
verbose : bool, optional
Print additional information about clustering results (the default is False)
normalize : bool, optional
Normalize the input data (should probably always be True)
time_column : str, optional
If there is a time column the output data will be ordered by this
(the default is 'TimeCreatedUtc')
max_cluster_distance : float, optional
DBSCAN eps (max cluster member distance) (the default is 0.01)
min_cluster_samples : int, optional
DBSCAN min_samples (the minimum cluster size) (the default is 2)
Other Parameters
----------------
kwargs: Other arguments are passed to DBSCAN constructor
Returns
-------
Tuple[pd.DataFrame, DBSCAN, np.ndarray]
Output dataframe with clustered rows
DBSCAN model
Normalized data set
"""
allowed_types = [np.ndarray, pd.DataFrame]
x_input = None
if isinstance(data, pd.DataFrame):
if cluster_columns is None:
x_input = data.values
else:
x_input = data[cluster_columns].values
elif isinstance(data, np.ndarray):
x_input = data if cluster_columns is None else data[:, cluster_columns].values
if x_input is None:
mssg = "Input data not in expected format.\n{} is not one of allowed types {}"
type_list = ", ".join(str(t) for t in allowed_types)
mssg = mssg.format(str(type(data)), type_list)
raise ValueError(mssg)
# Create DBSCAN cluster object
db_cluster = DBSCAN(
eps=max_cluster_distance, min_samples=min_cluster_samples, **kwargs
)
# Normalize the data (most clustering algorithms don't do well with
# unnormalized data)
x_norm = Normalizer().fit_transform(x_input) if normalize else x_input
# fit the data set
db_cluster.fit(x_norm)
labels = db_cluster.labels_
cluster_set, counts = np.unique(labels, return_counts=True)
if verbose:
print(
"Clustering for set size ",
len(x_norm),
" - ",
len(cluster_set),
" clusters",
)
print("Individual cluster sizes: ", ", ".join(str(c) for c in counts))
clustered_events = _merge_clustered_items(
cluster_set, labels, data, time_column, counts
)
if verbose:
print("Cluster output rows: ", len(clustered_events))
return clustered_events, db_cluster, x_norm
def _merge_clustered_items(
cluster_set: np.array,
labels: np.array,
data: Union[pd.DataFrame, np.array],
time_column: str,
counts: np.array,
) -> pd.DataFrame:
"""
Merge outliers and core clusters into single DataFrame.
Parameters
----------
cluster_set : np.array
The set of clusters
labels : np.array
The cluster labels
data : Union[pd.DataFrame, np.array]
The source data
time_column : str
Name of the Time column
counts : np.array
The counts of members in each cluster
Returns
-------
pd.DataFrame
Merged dataframe
"""
tz_aware = data.iloc[0][time_column].tz
ts_type = "datetime64[ns, UTC]" if tz_aware is not None else "datetime64[ns]"
cluster_list = []
# Iterate through clusters, adding exemplar to output frame
# pylint: disable=consider-using-enumerate
# we need to know the index of the item within the loop
for idx in range(len(cluster_set)):
cluster_id = cluster_set[idx]
class_members = labels == cluster_id
if isinstance(data, pd.DataFrame):
time_ordered = data[class_members].sort_values(time_column, ascending=True)
first_event_time = time_ordered[0:][time_column].iat[0]
last_event_time = time_ordered[-1:][time_column].iat[0]
else:
first_event_time = None
last_event_time = None
if cluster_id == -1:
# 'Noise' events are individual items that could not be assigned
# to a cluster and so are unique
cluster_list.append(
data[class_members]
.assign(
Clustered=False,
ClusterId=cluster_id,
ClusterSize=1,
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
else:
# Otherwise, just choose the first example of the cluster set
cluster_list.append(
data[class_members]
.assign(
Clustered=True,
ClusterId=cluster_id,
ClusterSize=counts[idx],
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)[0:1]
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
# pylint: enable=consider-using-enumerate
return pd.concat(cluster_list)
@export
def add_process_features(
input_frame: pd.DataFrame, path_separator: str = None, force: bool = False
) -> pd.DataFrame:
r"""
Add numerical features based on patterns of command line and process name.
Parameters
----------
input_frame : pd.DataFrame
The input dataframe
path_separator : str, optional
Path separator. If not supplied, try to determine
from 'NewProcessName' column of first 10 rows
(the default is None)
force : bool, optional
Forces re-calculation of feature columns even if they
already exist (the default is False)
Returns
-------
pd.DataFrame
Copy of the dataframe with the additional numeric features
Notes
-----
Features added:
- processNameLen: length of process file name (inc path)
- processNameTokens: the number of elements in the path
- processName: the process file name (minus path)
- commandlineTokens: number of space-separated tokens in the command line
- commandlineLen: length of the command line
- commandlineLogLen: log10 length of commandline
- isSystemSession: 1 if session Id is 0x3e7 for Windows or -1 for Linux
- commandlineTokensFull: counts number of token separators in commandline
[\\s\-\\/\.,"\'\|&:;%$()]
- pathScore: sum of ord() value of characters in path
- pathLogScore: log10 of pathScore
- commandlineScore: sum of ord() value of characters in commandline
- commandlineLogScore: log10 of commandlineScore
"""
output_df = input_frame.copy()
# Set any NaN values to empty string
if "NewProcessName" in output_df and "CommandLine" in output_df:
output_df[["NewProcessName", "CommandLine"]] = output_df[
["NewProcessName", "CommandLine"]
].fillna(value="")
# try to determine the path separator
if path_separator is None:
sample_df = output_df.head(10)
lx_path = len(sample_df[sample_df["NewProcessName"].str.contains("/")])
path_separator = "/" if lx_path else "\\"
# Create features from process name and command line
if "NewProcessName" in output_df:
_add_processname_features(output_df, force, path_separator)
if "CommandLine" in output_df:
_add_commandline_features(output_df, force)
if "SubjectLogonId" in output_df and ("isSystemSession" not in output_df or force):
output_df["isSystemSession"] = output_df["SubjectLogonId"].isin(["0x3e7", "-1"])
return output_df
# MASKED: _add_processname_features function (lines 316-347)
def _add_commandline_features(output_df: pd.DataFrame, force: bool):
"""
Add commandline default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
"""
if "commandlineLen" not in output_df or force:
output_df["commandlineLen"] = output_df.apply(
lambda x: len(x.CommandLine), axis=1
)
if "commandlineLogLen" not in output_df or force:
output_df["commandlineLogLen"] = output_df.apply(
lambda x: log10(x.commandlineLen) if x.commandlineLen else 0, axis=1
)
if "commandlineTokensFull" not in output_df or force:
output_df["commandlineTokensFull"] = output_df[["CommandLine"]].apply(
lambda x: delim_count(x.CommandLine), axis=1
)
if "commandlineScore" not in output_df or force:
output_df["commandlineScore"] = output_df.apply(
lambda x: char_ord_score(x.CommandLine), axis=1
)
if "commandlineTokensHash" not in output_df or force:
output_df["commandlineTokensHash"] = output_df.apply(
lambda x: delim_hash(x.CommandLine), axis=1
)
@export
@lru_cache(maxsize=1024)
def delim_count(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Count the delimiters in input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Count of delimiters in the string.
"""
return len(re.findall(delim_list, value))
@export
@lru_cache(maxsize=1024)
def delim_hash(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Return a hash (CRC32) of the delimiters from input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Hash of delimiter set in the string.
"""
return crc32(bytes("".join(re.findall(delim_list, value)), "utf-8"))
@export
@lru_cache(maxsize=1024)
def char_ord_score(value: str, scale: int = 1) -> int:
"""
Return sum of ord values of characters in string.
Parameters
----------
value : str
Data to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
int
[description]
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return floor(sum(ord(x) for x in value) / scale)
@export
@lru_cache(maxsize=1024)
def token_count(value: str, delimiter: str = " ") -> int:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
value : str
Data to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
int
count of tokens
"""
return len(value.split(delimiter))
def _string_score(input_str):
"""Sum the ord(c) for characters in a string."""
return sum(ord(x) for x in input_str)
@export
@lru_cache(maxsize=1024)
def crc32_hash(value: str) -> int:
"""
Return the CRC32 hash of the input column.
Parameters
----------
value : str
Data to process
Returns
-------
int
CRC32 hash
"""
return crc32(bytes(value.encode("utf-8")))
@export
def delim_count_df(
data: pd.DataFrame, column: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]'
) -> pd.Series:
r"""
Count the delimiters in input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
The name of the column to process
delim_list : str, optional
delimiters to use. (the default is r\'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]\')
Returns
-------
pd.Series
Count of delimiters in the string in `column`.
"""
return data[column].str.count(delim_list)
@export
def char_ord_score_df(data: pd.DataFrame, column: str, scale: int = 1) -> pd.Series:
"""
Return sum of ord values of characters in string.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
pd.Series
The sum of the ordinal values of the characters
in `column`.
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return data.apply(lambda x: sum(ord(char) for char in x[column]) / scale, axis=1)
@export
def token_count_df(data: pd.DataFrame, column: str, delimiter: str = " ") -> pd.Series:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
pd.Series
count of tokens in strings in `column`
"""
return data.apply(lambda x: len(x[column].split(delimiter)), axis=1)
@export
def crc32_hash_df(data: pd.DataFrame, column: str) -> pd.Series:
"""
Return the CRC32 hash of the input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
Returns
-------
pd.Series
CRC32 hash of input column
"""
return data.apply(lambda x: crc32(bytes(x[column].encode("utf-8"))), axis=1)
# pylint: disable=too-many-arguments, too-many-statements
@export # noqa: C901, MC0001
def plot_cluster(
db_cluster: DBSCAN,
data: pd.DataFrame,
x_predict: np.ndarray,
plot_label: str = None,
plot_features: Tuple[int, int] = (0, 1),
verbose: bool = False,
cut_off: int = 3,
xlabel: str = None,
ylabel: str = None,
):
"""
Plot clustered data as scatter chart.
Parameters
----------
db_cluster : DBSCAN
DBScan Cluster (from SkLearn DBSCAN).
data : pd.DataFrame
Dataframe containing original data.
x_predict : np.ndarray
The DBSCAN predict numpy array
plot_label : str, optional
If set the column to use to label data points
(the default is None)
plot_features : Tuple[int, int], optional
Which two features in x_predict to plot (the default is (0, 1))
verbose : bool, optional
Verbose execution with some extra info
(the default is False)
cut_off : int, optional
The cluster size below which items are considered outliers
(the default is 3)
xlabel : str, optional
x-axis label (the default is None)
ylabel : str, optional
y-axis label (the default is None)
"""
max_idx = x_predict.shape[1] - 1
if plot_features[0] >= x_predict.shape[1]:
raise ValueError(
"plot_features[0] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[1] >= x_predict.shape[1]:
raise ValueError(
"plot_features[1] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[0] == plot_features[1]:
mssg = "plot_features indexes must be 2 different values in range 0 to"
raise ValueError(mssg + f" {max_idx}.")
labels = db_cluster.labels_
core_samples_mask = np.zeros_like(labels, dtype=bool)
# pylint: disable=unsupported-assignment-operation
# (assignment of numpy array is valid)
core_samples_mask[db_cluster.core_sample_indices_] = True
unique_labels = set(labels)
# pylint: disable=no-member
# Spectral color map does exist
colors = [cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
_, counts = np.unique(labels, return_counts=True)
if verbose:
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(x_predict, labels))
if (
not isinstance(data, pd.DataFrame)
or plot_label is not None
and plot_label not in data
):
plot_label = None
p_label = None
for cluster_id, color in zip(unique_labels, colors):
if cluster_id == -1:
# Black used for noise.
color = [0, 0, 0, 1]
class_member_mask = labels == cluster_id
cluster_size = counts[cluster_id]
marker_size = cluster_size
marker = "o"
font_size = "small"
alpha = 0.4
if cluster_size < cut_off:
marker = "+"
marker_size = 10
font_size = "large"
alpha = 1.0
xy_pos = x_predict[class_member_mask & core_samples_mask]
plt.plot(
xy_pos[:, plot_features[0]],
xy_pos[:, plot_features[1]],
marker,
markerfacecolor=tuple(color),
markersize=marker_size,
)
if plot_label:
first_row = data[class_member_mask].iloc[0]
if not first_row.empty and plot_label in first_row:
p_label = first_row[plot_label]
try:
plt.annotate(
p_label,
xy=(xy_pos[0, plot_features[0]], xy_pos[0, plot_features[1]]),
fontsize=font_size,
alpha=alpha,
)
except IndexError:
pass
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
return plt
|
def _add_processname_features(
output_df: pd.DataFrame, force: bool, path_separator: str
):
"""
Add process name default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
path_separator : str
Path separator for OS
"""
if "processName" not in output_df or force:
output_df["processName"] = output_df.apply(
lambda x: x.NewProcessName.split(path_separator)[-1], axis=1
)
if "pathScore" not in output_df or force:
output_df["pathScore"] = output_df.apply(
lambda x: char_ord_score(x.NewProcessName), axis=1
)
if "pathLogScore" not in output_df or force:
output_df["pathLogScore"] = output_df.apply(
lambda x: log10(x.pathScore) if x.pathScore else 0, axis=1
)
if "pathHash" not in output_df or force:
output_df["pathHash"] = output_df.apply(
lambda x: crc32_hash(x.NewProcessName), axis=1
)
| 316 | 347 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
r"""
eventcluster module.
This module is intended to be used to summarize large numbers of events
into clusters of different patterns. High volume repeating events can
often make it difficult to see unique and interesting items.
The module contains functions to generate clusterable features from
string data. For example, an administration command that does some
maintenance on thousands of servers with a commandline such as:
``install-update -hostname {host.fqdn} -tmp:/tmp/{GUID}/rollback``\ can
be collapsed into a single cluster pattern by ignoring the character
values in the string and using delimiters or tokens to group the values.
This is an unsupervised learning module implemented using SciKit Learn
DBScan.
Contains:
dbcluster_events: generic clustering method using DBSCAN designed to summarize
process events and other similar data by grouping on common features.
add_process_features: derives numerical features from text features such as
commandline and process path.
"""
from binascii import crc32
from functools import lru_cache
from math import log10, floor
import re
from typing import List, Any, Tuple, Union
import numpy as np
import pandas as pd
from ..common.exceptions import MsticpyImportExtraError
from ..common.utility import export
from .._version import VERSION
try:
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import Normalizer
import matplotlib.pyplot as plt
from matplotlib import cm
except ImportError as imp_err:
raise MsticpyImportExtraError(
"Cannot use this feature without Sklearn and matplotlib installed",
title="Error importing Scikit Learn and matplotlib",
extra="ml",
) from imp_err
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=too-many-arguments, too-many-locals
@export
def dbcluster_events(
data: Any,
cluster_columns: List[Any] = None,
verbose: bool = False,
normalize: bool = True,
time_column: str = "TimeCreatedUtc",
max_cluster_distance: float = 0.01,
min_cluster_samples: int = 2,
**kwargs,
) -> Tuple[pd.DataFrame, DBSCAN, np.ndarray]:
"""
Cluster data set according to cluster_columns features.
Parameters
----------
data : Any
Input data as a pandas DataFrame or numpy array
cluster_columns : List[Any], optional
List of columns to use for features
- for DataFrame this is a list of column names
- for numpy array this is a list of column indexes
verbose : bool, optional
Print additional information about clustering results (the default is False)
normalize : bool, optional
Normalize the input data (should probably always be True)
time_column : str, optional
If there is a time column the output data will be ordered by this
(the default is 'TimeCreatedUtc')
max_cluster_distance : float, optional
DBSCAN eps (max cluster member distance) (the default is 0.01)
min_cluster_samples : int, optional
DBSCAN min_samples (the minimum cluster size) (the default is 2)
Other Parameters
----------------
kwargs: Other arguments are passed to DBSCAN constructor
Returns
-------
Tuple[pd.DataFrame, DBSCAN, np.ndarray]
Output dataframe with clustered rows
DBSCAN model
Normalized data set
"""
allowed_types = [np.ndarray, pd.DataFrame]
x_input = None
if isinstance(data, pd.DataFrame):
if cluster_columns is None:
x_input = data.values
else:
x_input = data[cluster_columns].values
elif isinstance(data, np.ndarray):
x_input = data if cluster_columns is None else data[:, cluster_columns].values
if x_input is None:
mssg = "Input data not in expected format.\n{} is not one of allowed types {}"
type_list = ", ".join(str(t) for t in allowed_types)
mssg = mssg.format(str(type(data)), type_list)
raise ValueError(mssg)
# Create DBSCAN cluster object
db_cluster = DBSCAN(
eps=max_cluster_distance, min_samples=min_cluster_samples, **kwargs
)
# Normalize the data (most clustering algorithms don't do well with
# unnormalized data)
x_norm = Normalizer().fit_transform(x_input) if normalize else x_input
# fit the data set
db_cluster.fit(x_norm)
labels = db_cluster.labels_
cluster_set, counts = np.unique(labels, return_counts=True)
if verbose:
print(
"Clustering for set size ",
len(x_norm),
" - ",
len(cluster_set),
" clusters",
)
print("Individual cluster sizes: ", ", ".join(str(c) for c in counts))
clustered_events = _merge_clustered_items(
cluster_set, labels, data, time_column, counts
)
if verbose:
print("Cluster output rows: ", len(clustered_events))
return clustered_events, db_cluster, x_norm
def _merge_clustered_items(
cluster_set: np.array,
labels: np.array,
data: Union[pd.DataFrame, np.array],
time_column: str,
counts: np.array,
) -> pd.DataFrame:
"""
Merge outliers and core clusters into single DataFrame.
Parameters
----------
cluster_set : np.array
The set of clusters
labels : np.array
The cluster labels
data : Union[pd.DataFrame, np.array]
The source data
time_column : str
Name of the Time column
counts : np.array
The counts of members in each cluster
Returns
-------
pd.DataFrame
Merged dataframe
"""
tz_aware = data.iloc[0][time_column].tz
ts_type = "datetime64[ns, UTC]" if tz_aware is not None else "datetime64[ns]"
cluster_list = []
# Iterate through clusters, adding exemplar to output frame
# pylint: disable=consider-using-enumerate
# we need to know the index of the item within the loop
for idx in range(len(cluster_set)):
cluster_id = cluster_set[idx]
class_members = labels == cluster_id
if isinstance(data, pd.DataFrame):
time_ordered = data[class_members].sort_values(time_column, ascending=True)
first_event_time = time_ordered[0:][time_column].iat[0]
last_event_time = time_ordered[-1:][time_column].iat[0]
else:
first_event_time = None
last_event_time = None
if cluster_id == -1:
# 'Noise' events are individual items that could not be assigned
# to a cluster and so are unique
cluster_list.append(
data[class_members]
.assign(
Clustered=False,
ClusterId=cluster_id,
ClusterSize=1,
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
else:
# Otherwise, just choose the first example of the cluster set
cluster_list.append(
data[class_members]
.assign(
Clustered=True,
ClusterId=cluster_id,
ClusterSize=counts[idx],
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)[0:1]
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
# pylint: enable=consider-using-enumerate
return pd.concat(cluster_list)
@export
def add_process_features(
input_frame: pd.DataFrame, path_separator: str = None, force: bool = False
) -> pd.DataFrame:
r"""
Add numerical features based on patterns of command line and process name.
Parameters
----------
input_frame : pd.DataFrame
The input dataframe
path_separator : str, optional
Path separator. If not supplied, try to determine
from 'NewProcessName' column of first 10 rows
(the default is None)
force : bool, optional
Forces re-calculation of feature columns even if they
already exist (the default is False)
Returns
-------
pd.DataFrame
Copy of the dataframe with the additional numeric features
Notes
-----
Features added:
- processNameLen: length of process file name (inc path)
- processNameTokens: the number of elements in the path
- processName: the process file name (minus path)
- commandlineTokens: number of space-separated tokens in the command line
- commandlineLen: length of the command line
- commandlineLogLen: log10 length of commandline
- isSystemSession: 1 if session Id is 0x3e7 for Windows or -1 for Linux
- commandlineTokensFull: counts number of token separators in commandline
[\\s\-\\/\.,"\'\|&:;%$()]
- pathScore: sum of ord() value of characters in path
- pathLogScore: log10 of pathScore
- commandlineScore: sum of ord() value of characters in commandline
- commandlineLogScore: log10 of commandlineScore
"""
output_df = input_frame.copy()
# Set any NaN values to empty string
if "NewProcessName" in output_df and "CommandLine" in output_df:
output_df[["NewProcessName", "CommandLine"]] = output_df[
["NewProcessName", "CommandLine"]
].fillna(value="")
# try to determine the path separator
if path_separator is None:
sample_df = output_df.head(10)
lx_path = len(sample_df[sample_df["NewProcessName"].str.contains("/")])
path_separator = "/" if lx_path else "\\"
# Create features from process name and command line
if "NewProcessName" in output_df:
_add_processname_features(output_df, force, path_separator)
if "CommandLine" in output_df:
_add_commandline_features(output_df, force)
if "SubjectLogonId" in output_df and ("isSystemSession" not in output_df or force):
output_df["isSystemSession"] = output_df["SubjectLogonId"].isin(["0x3e7", "-1"])
return output_df
def _add_processname_features(
output_df: pd.DataFrame, force: bool, path_separator: str
):
"""
Add process name default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
path_separator : str
Path separator for OS
"""
if "processName" not in output_df or force:
output_df["processName"] = output_df.apply(
lambda x: x.NewProcessName.split(path_separator)[-1], axis=1
)
if "pathScore" not in output_df or force:
output_df["pathScore"] = output_df.apply(
lambda x: char_ord_score(x.NewProcessName), axis=1
)
if "pathLogScore" not in output_df or force:
output_df["pathLogScore"] = output_df.apply(
lambda x: log10(x.pathScore) if x.pathScore else 0, axis=1
)
if "pathHash" not in output_df or force:
output_df["pathHash"] = output_df.apply(
lambda x: crc32_hash(x.NewProcessName), axis=1
)
def _add_commandline_features(output_df: pd.DataFrame, force: bool):
"""
Add commandline default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
"""
if "commandlineLen" not in output_df or force:
output_df["commandlineLen"] = output_df.apply(
lambda x: len(x.CommandLine), axis=1
)
if "commandlineLogLen" not in output_df or force:
output_df["commandlineLogLen"] = output_df.apply(
lambda x: log10(x.commandlineLen) if x.commandlineLen else 0, axis=1
)
if "commandlineTokensFull" not in output_df or force:
output_df["commandlineTokensFull"] = output_df[["CommandLine"]].apply(
lambda x: delim_count(x.CommandLine), axis=1
)
if "commandlineScore" not in output_df or force:
output_df["commandlineScore"] = output_df.apply(
lambda x: char_ord_score(x.CommandLine), axis=1
)
if "commandlineTokensHash" not in output_df or force:
output_df["commandlineTokensHash"] = output_df.apply(
lambda x: delim_hash(x.CommandLine), axis=1
)
@export
@lru_cache(maxsize=1024)
def delim_count(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Count the delimiters in input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Count of delimiters in the string.
"""
return len(re.findall(delim_list, value))
@export
@lru_cache(maxsize=1024)
def delim_hash(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Return a hash (CRC32) of the delimiters from input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Hash of delimiter set in the string.
"""
return crc32(bytes("".join(re.findall(delim_list, value)), "utf-8"))
@export
@lru_cache(maxsize=1024)
def char_ord_score(value: str, scale: int = 1) -> int:
"""
Return sum of ord values of characters in string.
Parameters
----------
value : str
Data to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
int
[description]
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return floor(sum(ord(x) for x in value) / scale)
@export
@lru_cache(maxsize=1024)
def token_count(value: str, delimiter: str = " ") -> int:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
value : str
Data to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
int
count of tokens
"""
return len(value.split(delimiter))
def _string_score(input_str):
"""Sum the ord(c) for characters in a string."""
return sum(ord(x) for x in input_str)
@export
@lru_cache(maxsize=1024)
def crc32_hash(value: str) -> int:
"""
Return the CRC32 hash of the input column.
Parameters
----------
value : str
Data to process
Returns
-------
int
CRC32 hash
"""
return crc32(bytes(value.encode("utf-8")))
@export
def delim_count_df(
data: pd.DataFrame, column: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]'
) -> pd.Series:
r"""
Count the delimiters in input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
The name of the column to process
delim_list : str, optional
delimiters to use. (the default is r\'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]\')
Returns
-------
pd.Series
Count of delimiters in the string in `column`.
"""
return data[column].str.count(delim_list)
@export
def char_ord_score_df(data: pd.DataFrame, column: str, scale: int = 1) -> pd.Series:
"""
Return sum of ord values of characters in string.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
pd.Series
The sum of the ordinal values of the characters
in `column`.
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return data.apply(lambda x: sum(ord(char) for char in x[column]) / scale, axis=1)
@export
def token_count_df(data: pd.DataFrame, column: str, delimiter: str = " ") -> pd.Series:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
pd.Series
count of tokens in strings in `column`
"""
return data.apply(lambda x: len(x[column].split(delimiter)), axis=1)
@export
def crc32_hash_df(data: pd.DataFrame, column: str) -> pd.Series:
"""
Return the CRC32 hash of the input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
Returns
-------
pd.Series
CRC32 hash of input column
"""
return data.apply(lambda x: crc32(bytes(x[column].encode("utf-8"))), axis=1)
# pylint: disable=too-many-arguments, too-many-statements
@export # noqa: C901, MC0001
def plot_cluster(
db_cluster: DBSCAN,
data: pd.DataFrame,
x_predict: np.ndarray,
plot_label: str = None,
plot_features: Tuple[int, int] = (0, 1),
verbose: bool = False,
cut_off: int = 3,
xlabel: str = None,
ylabel: str = None,
):
"""
Plot clustered data as scatter chart.
Parameters
----------
db_cluster : DBSCAN
DBScan Cluster (from SkLearn DBSCAN).
data : pd.DataFrame
Dataframe containing original data.
x_predict : np.ndarray
The DBSCAN predict numpy array
plot_label : str, optional
If set the column to use to label data points
(the default is None)
plot_features : Tuple[int, int], optional
Which two features in x_predict to plot (the default is (0, 1))
verbose : bool, optional
Verbose execution with some extra info
(the default is False)
cut_off : int, optional
The cluster size below which items are considered outliers
(the default is 3)
xlabel : str, optional
x-axis label (the default is None)
ylabel : str, optional
y-axis label (the default is None)
"""
max_idx = x_predict.shape[1] - 1
if plot_features[0] >= x_predict.shape[1]:
raise ValueError(
"plot_features[0] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[1] >= x_predict.shape[1]:
raise ValueError(
"plot_features[1] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[0] == plot_features[1]:
mssg = "plot_features indexes must be 2 different values in range 0 to"
raise ValueError(mssg + f" {max_idx}.")
labels = db_cluster.labels_
core_samples_mask = np.zeros_like(labels, dtype=bool)
# pylint: disable=unsupported-assignment-operation
# (assignment of numpy array is valid)
core_samples_mask[db_cluster.core_sample_indices_] = True
unique_labels = set(labels)
# pylint: disable=no-member
# Spectral color map does exist
colors = [cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
_, counts = np.unique(labels, return_counts=True)
if verbose:
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(x_predict, labels))
if (
not isinstance(data, pd.DataFrame)
or plot_label is not None
and plot_label not in data
):
plot_label = None
p_label = None
for cluster_id, color in zip(unique_labels, colors):
if cluster_id == -1:
# Black used for noise.
color = [0, 0, 0, 1]
class_member_mask = labels == cluster_id
cluster_size = counts[cluster_id]
marker_size = cluster_size
marker = "o"
font_size = "small"
alpha = 0.4
if cluster_size < cut_off:
marker = "+"
marker_size = 10
font_size = "large"
alpha = 1.0
xy_pos = x_predict[class_member_mask & core_samples_mask]
plt.plot(
xy_pos[:, plot_features[0]],
xy_pos[:, plot_features[1]],
marker,
markerfacecolor=tuple(color),
markersize=marker_size,
)
if plot_label:
first_row = data[class_member_mask].iloc[0]
if not first_row.empty and plot_label in first_row:
p_label = first_row[plot_label]
try:
plt.annotate(
p_label,
xy=(xy_pos[0, plot_features[0]], xy_pos[0, plot_features[1]]),
fontsize=font_size,
alpha=alpha,
)
except IndexError:
pass
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
return plt
|
_add_commandline_features
|
Add commandline default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
r"""
eventcluster module.
This module is intended to be used to summarize large numbers of events
into clusters of different patterns. High volume repeating events can
often make it difficult to see unique and interesting items.
The module contains functions to generate clusterable features from
string data. For example, an administration command that does some
maintenance on thousands of servers with a commandline such as:
``install-update -hostname {host.fqdn} -tmp:/tmp/{GUID}/rollback``\ can
be collapsed into a single cluster pattern by ignoring the character
values in the string and using delimiters or tokens to group the values.
This is an unsupervised learning module implemented using SciKit Learn
DBScan.
Contains:
dbcluster_events: generic clustering method using DBSCAN designed to summarize
process events and other similar data by grouping on common features.
add_process_features: derives numerical features from text features such as
commandline and process path.
"""
from binascii import crc32
from functools import lru_cache
from math import log10, floor
import re
from typing import List, Any, Tuple, Union
import numpy as np
import pandas as pd
from ..common.exceptions import MsticpyImportExtraError
from ..common.utility import export
from .._version import VERSION
try:
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import Normalizer
import matplotlib.pyplot as plt
from matplotlib import cm
except ImportError as imp_err:
raise MsticpyImportExtraError(
"Cannot use this feature without Sklearn and matplotlib installed",
title="Error importing Scikit Learn and matplotlib",
extra="ml",
) from imp_err
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=too-many-arguments, too-many-locals
@export
def dbcluster_events(
data: Any,
cluster_columns: List[Any] = None,
verbose: bool = False,
normalize: bool = True,
time_column: str = "TimeCreatedUtc",
max_cluster_distance: float = 0.01,
min_cluster_samples: int = 2,
**kwargs,
) -> Tuple[pd.DataFrame, DBSCAN, np.ndarray]:
"""
Cluster data set according to cluster_columns features.
Parameters
----------
data : Any
Input data as a pandas DataFrame or numpy array
cluster_columns : List[Any], optional
List of columns to use for features
- for DataFrame this is a list of column names
- for numpy array this is a list of column indexes
verbose : bool, optional
Print additional information about clustering results (the default is False)
normalize : bool, optional
Normalize the input data (should probably always be True)
time_column : str, optional
If there is a time column the output data will be ordered by this
(the default is 'TimeCreatedUtc')
max_cluster_distance : float, optional
DBSCAN eps (max cluster member distance) (the default is 0.01)
min_cluster_samples : int, optional
DBSCAN min_samples (the minimum cluster size) (the default is 2)
Other Parameters
----------------
kwargs: Other arguments are passed to DBSCAN constructor
Returns
-------
Tuple[pd.DataFrame, DBSCAN, np.ndarray]
Output dataframe with clustered rows
DBSCAN model
Normalized data set
"""
allowed_types = [np.ndarray, pd.DataFrame]
x_input = None
if isinstance(data, pd.DataFrame):
if cluster_columns is None:
x_input = data.values
else:
x_input = data[cluster_columns].values
elif isinstance(data, np.ndarray):
x_input = data if cluster_columns is None else data[:, cluster_columns].values
if x_input is None:
mssg = "Input data not in expected format.\n{} is not one of allowed types {}"
type_list = ", ".join(str(t) for t in allowed_types)
mssg = mssg.format(str(type(data)), type_list)
raise ValueError(mssg)
# Create DBSCAN cluster object
db_cluster = DBSCAN(
eps=max_cluster_distance, min_samples=min_cluster_samples, **kwargs
)
# Normalize the data (most clustering algorithms don't do well with
# unnormalized data)
x_norm = Normalizer().fit_transform(x_input) if normalize else x_input
# fit the data set
db_cluster.fit(x_norm)
labels = db_cluster.labels_
cluster_set, counts = np.unique(labels, return_counts=True)
if verbose:
print(
"Clustering for set size ",
len(x_norm),
" - ",
len(cluster_set),
" clusters",
)
print("Individual cluster sizes: ", ", ".join(str(c) for c in counts))
clustered_events = _merge_clustered_items(
cluster_set, labels, data, time_column, counts
)
if verbose:
print("Cluster output rows: ", len(clustered_events))
return clustered_events, db_cluster, x_norm
def _merge_clustered_items(
cluster_set: np.array,
labels: np.array,
data: Union[pd.DataFrame, np.array],
time_column: str,
counts: np.array,
) -> pd.DataFrame:
"""
Merge outliers and core clusters into single DataFrame.
Parameters
----------
cluster_set : np.array
The set of clusters
labels : np.array
The cluster labels
data : Union[pd.DataFrame, np.array]
The source data
time_column : str
Name of the Time column
counts : np.array
The counts of members in each cluster
Returns
-------
pd.DataFrame
Merged dataframe
"""
tz_aware = data.iloc[0][time_column].tz
ts_type = "datetime64[ns, UTC]" if tz_aware is not None else "datetime64[ns]"
cluster_list = []
# Iterate through clusters, adding exemplar to output frame
# pylint: disable=consider-using-enumerate
# we need to know the index of the item within the loop
for idx in range(len(cluster_set)):
cluster_id = cluster_set[idx]
class_members = labels == cluster_id
if isinstance(data, pd.DataFrame):
time_ordered = data[class_members].sort_values(time_column, ascending=True)
first_event_time = time_ordered[0:][time_column].iat[0]
last_event_time = time_ordered[-1:][time_column].iat[0]
else:
first_event_time = None
last_event_time = None
if cluster_id == -1:
# 'Noise' events are individual items that could not be assigned
# to a cluster and so are unique
cluster_list.append(
data[class_members]
.assign(
Clustered=False,
ClusterId=cluster_id,
ClusterSize=1,
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
else:
# Otherwise, just choose the first example of the cluster set
cluster_list.append(
data[class_members]
.assign(
Clustered=True,
ClusterId=cluster_id,
ClusterSize=counts[idx],
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)[0:1]
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
# pylint: enable=consider-using-enumerate
return pd.concat(cluster_list)
@export
def add_process_features(
input_frame: pd.DataFrame, path_separator: str = None, force: bool = False
) -> pd.DataFrame:
r"""
Add numerical features based on patterns of command line and process name.
Parameters
----------
input_frame : pd.DataFrame
The input dataframe
path_separator : str, optional
Path separator. If not supplied, try to determine
from 'NewProcessName' column of first 10 rows
(the default is None)
force : bool, optional
Forces re-calculation of feature columns even if they
already exist (the default is False)
Returns
-------
pd.DataFrame
Copy of the dataframe with the additional numeric features
Notes
-----
Features added:
- processNameLen: length of process file name (inc path)
- processNameTokens: the number of elements in the path
- processName: the process file name (minus path)
- commandlineTokens: number of space-separated tokens in the command line
- commandlineLen: length of the command line
- commandlineLogLen: log10 length of commandline
- isSystemSession: 1 if session Id is 0x3e7 for Windows or -1 for Linux
- commandlineTokensFull: counts number of token separators in commandline
[\\s\-\\/\.,"\'\|&:;%$()]
- pathScore: sum of ord() value of characters in path
- pathLogScore: log10 of pathScore
- commandlineScore: sum of ord() value of characters in commandline
- commandlineLogScore: log10 of commandlineScore
"""
output_df = input_frame.copy()
# Set any NaN values to empty string
if "NewProcessName" in output_df and "CommandLine" in output_df:
output_df[["NewProcessName", "CommandLine"]] = output_df[
["NewProcessName", "CommandLine"]
].fillna(value="")
# try to determine the path separator
if path_separator is None:
sample_df = output_df.head(10)
lx_path = len(sample_df[sample_df["NewProcessName"].str.contains("/")])
path_separator = "/" if lx_path else "\\"
# Create features from process name and command line
if "NewProcessName" in output_df:
_add_processname_features(output_df, force, path_separator)
if "CommandLine" in output_df:
_add_commandline_features(output_df, force)
if "SubjectLogonId" in output_df and ("isSystemSession" not in output_df or force):
output_df["isSystemSession"] = output_df["SubjectLogonId"].isin(["0x3e7", "-1"])
return output_df
def _add_processname_features(
output_df: pd.DataFrame, force: bool, path_separator: str
):
"""
Add process name default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
path_separator : str
Path separator for OS
"""
if "processName" not in output_df or force:
output_df["processName"] = output_df.apply(
lambda x: x.NewProcessName.split(path_separator)[-1], axis=1
)
if "pathScore" not in output_df or force:
output_df["pathScore"] = output_df.apply(
lambda x: char_ord_score(x.NewProcessName), axis=1
)
if "pathLogScore" not in output_df or force:
output_df["pathLogScore"] = output_df.apply(
lambda x: log10(x.pathScore) if x.pathScore else 0, axis=1
)
if "pathHash" not in output_df or force:
output_df["pathHash"] = output_df.apply(
lambda x: crc32_hash(x.NewProcessName), axis=1
)
# MASKED: _add_commandline_features function (lines 350-382)
@export
@lru_cache(maxsize=1024)
def delim_count(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Count the delimiters in input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Count of delimiters in the string.
"""
return len(re.findall(delim_list, value))
@export
@lru_cache(maxsize=1024)
def delim_hash(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Return a hash (CRC32) of the delimiters from input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Hash of delimiter set in the string.
"""
return crc32(bytes("".join(re.findall(delim_list, value)), "utf-8"))
@export
@lru_cache(maxsize=1024)
def char_ord_score(value: str, scale: int = 1) -> int:
"""
Return sum of ord values of characters in string.
Parameters
----------
value : str
Data to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
int
[description]
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return floor(sum(ord(x) for x in value) / scale)
@export
@lru_cache(maxsize=1024)
def token_count(value: str, delimiter: str = " ") -> int:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
value : str
Data to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
int
count of tokens
"""
return len(value.split(delimiter))
def _string_score(input_str):
"""Sum the ord(c) for characters in a string."""
return sum(ord(x) for x in input_str)
@export
@lru_cache(maxsize=1024)
def crc32_hash(value: str) -> int:
"""
Return the CRC32 hash of the input column.
Parameters
----------
value : str
Data to process
Returns
-------
int
CRC32 hash
"""
return crc32(bytes(value.encode("utf-8")))
@export
def delim_count_df(
data: pd.DataFrame, column: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]'
) -> pd.Series:
r"""
Count the delimiters in input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
The name of the column to process
delim_list : str, optional
delimiters to use. (the default is r\'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]\')
Returns
-------
pd.Series
Count of delimiters in the string in `column`.
"""
return data[column].str.count(delim_list)
@export
def char_ord_score_df(data: pd.DataFrame, column: str, scale: int = 1) -> pd.Series:
"""
Return sum of ord values of characters in string.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
pd.Series
The sum of the ordinal values of the characters
in `column`.
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return data.apply(lambda x: sum(ord(char) for char in x[column]) / scale, axis=1)
@export
def token_count_df(data: pd.DataFrame, column: str, delimiter: str = " ") -> pd.Series:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
pd.Series
count of tokens in strings in `column`
"""
return data.apply(lambda x: len(x[column].split(delimiter)), axis=1)
@export
def crc32_hash_df(data: pd.DataFrame, column: str) -> pd.Series:
"""
Return the CRC32 hash of the input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
Returns
-------
pd.Series
CRC32 hash of input column
"""
return data.apply(lambda x: crc32(bytes(x[column].encode("utf-8"))), axis=1)
# pylint: disable=too-many-arguments, too-many-statements
@export # noqa: C901, MC0001
def plot_cluster(
db_cluster: DBSCAN,
data: pd.DataFrame,
x_predict: np.ndarray,
plot_label: str = None,
plot_features: Tuple[int, int] = (0, 1),
verbose: bool = False,
cut_off: int = 3,
xlabel: str = None,
ylabel: str = None,
):
"""
Plot clustered data as scatter chart.
Parameters
----------
db_cluster : DBSCAN
DBScan Cluster (from SkLearn DBSCAN).
data : pd.DataFrame
Dataframe containing original data.
x_predict : np.ndarray
The DBSCAN predict numpy array
plot_label : str, optional
If set the column to use to label data points
(the default is None)
plot_features : Tuple[int, int], optional
Which two features in x_predict to plot (the default is (0, 1))
verbose : bool, optional
Verbose execution with some extra info
(the default is False)
cut_off : int, optional
The cluster size below which items are considered outliers
(the default is 3)
xlabel : str, optional
x-axis label (the default is None)
ylabel : str, optional
y-axis label (the default is None)
"""
max_idx = x_predict.shape[1] - 1
if plot_features[0] >= x_predict.shape[1]:
raise ValueError(
"plot_features[0] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[1] >= x_predict.shape[1]:
raise ValueError(
"plot_features[1] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[0] == plot_features[1]:
mssg = "plot_features indexes must be 2 different values in range 0 to"
raise ValueError(mssg + f" {max_idx}.")
labels = db_cluster.labels_
core_samples_mask = np.zeros_like(labels, dtype=bool)
# pylint: disable=unsupported-assignment-operation
# (assignment of numpy array is valid)
core_samples_mask[db_cluster.core_sample_indices_] = True
unique_labels = set(labels)
# pylint: disable=no-member
# Spectral color map does exist
colors = [cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
_, counts = np.unique(labels, return_counts=True)
if verbose:
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(x_predict, labels))
if (
not isinstance(data, pd.DataFrame)
or plot_label is not None
and plot_label not in data
):
plot_label = None
p_label = None
for cluster_id, color in zip(unique_labels, colors):
if cluster_id == -1:
# Black used for noise.
color = [0, 0, 0, 1]
class_member_mask = labels == cluster_id
cluster_size = counts[cluster_id]
marker_size = cluster_size
marker = "o"
font_size = "small"
alpha = 0.4
if cluster_size < cut_off:
marker = "+"
marker_size = 10
font_size = "large"
alpha = 1.0
xy_pos = x_predict[class_member_mask & core_samples_mask]
plt.plot(
xy_pos[:, plot_features[0]],
xy_pos[:, plot_features[1]],
marker,
markerfacecolor=tuple(color),
markersize=marker_size,
)
if plot_label:
first_row = data[class_member_mask].iloc[0]
if not first_row.empty and plot_label in first_row:
p_label = first_row[plot_label]
try:
plt.annotate(
p_label,
xy=(xy_pos[0, plot_features[0]], xy_pos[0, plot_features[1]]),
fontsize=font_size,
alpha=alpha,
)
except IndexError:
pass
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
return plt
|
def _add_commandline_features(output_df: pd.DataFrame, force: bool):
"""
Add commandline default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
"""
if "commandlineLen" not in output_df or force:
output_df["commandlineLen"] = output_df.apply(
lambda x: len(x.CommandLine), axis=1
)
if "commandlineLogLen" not in output_df or force:
output_df["commandlineLogLen"] = output_df.apply(
lambda x: log10(x.commandlineLen) if x.commandlineLen else 0, axis=1
)
if "commandlineTokensFull" not in output_df or force:
output_df["commandlineTokensFull"] = output_df[["CommandLine"]].apply(
lambda x: delim_count(x.CommandLine), axis=1
)
if "commandlineScore" not in output_df or force:
output_df["commandlineScore"] = output_df.apply(
lambda x: char_ord_score(x.CommandLine), axis=1
)
if "commandlineTokensHash" not in output_df or force:
output_df["commandlineTokensHash"] = output_df.apply(
lambda x: delim_hash(x.CommandLine), axis=1
)
| 350 | 382 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
r"""
eventcluster module.
This module is intended to be used to summarize large numbers of events
into clusters of different patterns. High volume repeating events can
often make it difficult to see unique and interesting items.
The module contains functions to generate clusterable features from
string data. For example, an administration command that does some
maintenance on thousands of servers with a commandline such as:
``install-update -hostname {host.fqdn} -tmp:/tmp/{GUID}/rollback``\ can
be collapsed into a single cluster pattern by ignoring the character
values in the string and using delimiters or tokens to group the values.
This is an unsupervised learning module implemented using SciKit Learn
DBScan.
Contains:
dbcluster_events: generic clustering method using DBSCAN designed to summarize
process events and other similar data by grouping on common features.
add_process_features: derives numerical features from text features such as
commandline and process path.
"""
from binascii import crc32
from functools import lru_cache
from math import log10, floor
import re
from typing import List, Any, Tuple, Union
import numpy as np
import pandas as pd
from ..common.exceptions import MsticpyImportExtraError
from ..common.utility import export
from .._version import VERSION
try:
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import Normalizer
import matplotlib.pyplot as plt
from matplotlib import cm
except ImportError as imp_err:
raise MsticpyImportExtraError(
"Cannot use this feature without Sklearn and matplotlib installed",
title="Error importing Scikit Learn and matplotlib",
extra="ml",
) from imp_err
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=too-many-arguments, too-many-locals
@export
def dbcluster_events(
data: Any,
cluster_columns: List[Any] = None,
verbose: bool = False,
normalize: bool = True,
time_column: str = "TimeCreatedUtc",
max_cluster_distance: float = 0.01,
min_cluster_samples: int = 2,
**kwargs,
) -> Tuple[pd.DataFrame, DBSCAN, np.ndarray]:
"""
Cluster data set according to cluster_columns features.
Parameters
----------
data : Any
Input data as a pandas DataFrame or numpy array
cluster_columns : List[Any], optional
List of columns to use for features
- for DataFrame this is a list of column names
- for numpy array this is a list of column indexes
verbose : bool, optional
Print additional information about clustering results (the default is False)
normalize : bool, optional
Normalize the input data (should probably always be True)
time_column : str, optional
If there is a time column the output data will be ordered by this
(the default is 'TimeCreatedUtc')
max_cluster_distance : float, optional
DBSCAN eps (max cluster member distance) (the default is 0.01)
min_cluster_samples : int, optional
DBSCAN min_samples (the minimum cluster size) (the default is 2)
Other Parameters
----------------
kwargs: Other arguments are passed to DBSCAN constructor
Returns
-------
Tuple[pd.DataFrame, DBSCAN, np.ndarray]
Output dataframe with clustered rows
DBSCAN model
Normalized data set
"""
allowed_types = [np.ndarray, pd.DataFrame]
x_input = None
if isinstance(data, pd.DataFrame):
if cluster_columns is None:
x_input = data.values
else:
x_input = data[cluster_columns].values
elif isinstance(data, np.ndarray):
x_input = data if cluster_columns is None else data[:, cluster_columns].values
if x_input is None:
mssg = "Input data not in expected format.\n{} is not one of allowed types {}"
type_list = ", ".join(str(t) for t in allowed_types)
mssg = mssg.format(str(type(data)), type_list)
raise ValueError(mssg)
# Create DBSCAN cluster object
db_cluster = DBSCAN(
eps=max_cluster_distance, min_samples=min_cluster_samples, **kwargs
)
# Normalize the data (most clustering algorithms don't do well with
# unnormalized data)
x_norm = Normalizer().fit_transform(x_input) if normalize else x_input
# fit the data set
db_cluster.fit(x_norm)
labels = db_cluster.labels_
cluster_set, counts = np.unique(labels, return_counts=True)
if verbose:
print(
"Clustering for set size ",
len(x_norm),
" - ",
len(cluster_set),
" clusters",
)
print("Individual cluster sizes: ", ", ".join(str(c) for c in counts))
clustered_events = _merge_clustered_items(
cluster_set, labels, data, time_column, counts
)
if verbose:
print("Cluster output rows: ", len(clustered_events))
return clustered_events, db_cluster, x_norm
def _merge_clustered_items(
cluster_set: np.array,
labels: np.array,
data: Union[pd.DataFrame, np.array],
time_column: str,
counts: np.array,
) -> pd.DataFrame:
"""
Merge outliers and core clusters into single DataFrame.
Parameters
----------
cluster_set : np.array
The set of clusters
labels : np.array
The cluster labels
data : Union[pd.DataFrame, np.array]
The source data
time_column : str
Name of the Time column
counts : np.array
The counts of members in each cluster
Returns
-------
pd.DataFrame
Merged dataframe
"""
tz_aware = data.iloc[0][time_column].tz
ts_type = "datetime64[ns, UTC]" if tz_aware is not None else "datetime64[ns]"
cluster_list = []
# Iterate through clusters, adding exemplar to output frame
# pylint: disable=consider-using-enumerate
# we need to know the index of the item within the loop
for idx in range(len(cluster_set)):
cluster_id = cluster_set[idx]
class_members = labels == cluster_id
if isinstance(data, pd.DataFrame):
time_ordered = data[class_members].sort_values(time_column, ascending=True)
first_event_time = time_ordered[0:][time_column].iat[0]
last_event_time = time_ordered[-1:][time_column].iat[0]
else:
first_event_time = None
last_event_time = None
if cluster_id == -1:
# 'Noise' events are individual items that could not be assigned
# to a cluster and so are unique
cluster_list.append(
data[class_members]
.assign(
Clustered=False,
ClusterId=cluster_id,
ClusterSize=1,
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
else:
# Otherwise, just choose the first example of the cluster set
cluster_list.append(
data[class_members]
.assign(
Clustered=True,
ClusterId=cluster_id,
ClusterSize=counts[idx],
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)[0:1]
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
# pylint: enable=consider-using-enumerate
return pd.concat(cluster_list)
@export
def add_process_features(
input_frame: pd.DataFrame, path_separator: str = None, force: bool = False
) -> pd.DataFrame:
r"""
Add numerical features based on patterns of command line and process name.
Parameters
----------
input_frame : pd.DataFrame
The input dataframe
path_separator : str, optional
Path separator. If not supplied, try to determine
from 'NewProcessName' column of first 10 rows
(the default is None)
force : bool, optional
Forces re-calculation of feature columns even if they
already exist (the default is False)
Returns
-------
pd.DataFrame
Copy of the dataframe with the additional numeric features
Notes
-----
Features added:
- processNameLen: length of process file name (inc path)
- processNameTokens: the number of elements in the path
- processName: the process file name (minus path)
- commandlineTokens: number of space-separated tokens in the command line
- commandlineLen: length of the command line
- commandlineLogLen: log10 length of commandline
- isSystemSession: 1 if session Id is 0x3e7 for Windows or -1 for Linux
- commandlineTokensFull: counts number of token separators in commandline
[\\s\-\\/\.,"\'\|&:;%$()]
- pathScore: sum of ord() value of characters in path
- pathLogScore: log10 of pathScore
- commandlineScore: sum of ord() value of characters in commandline
- commandlineLogScore: log10 of commandlineScore
"""
output_df = input_frame.copy()
# Set any NaN values to empty string
if "NewProcessName" in output_df and "CommandLine" in output_df:
output_df[["NewProcessName", "CommandLine"]] = output_df[
["NewProcessName", "CommandLine"]
].fillna(value="")
# try to determine the path separator
if path_separator is None:
sample_df = output_df.head(10)
lx_path = len(sample_df[sample_df["NewProcessName"].str.contains("/")])
path_separator = "/" if lx_path else "\\"
# Create features from process name and command line
if "NewProcessName" in output_df:
_add_processname_features(output_df, force, path_separator)
if "CommandLine" in output_df:
_add_commandline_features(output_df, force)
if "SubjectLogonId" in output_df and ("isSystemSession" not in output_df or force):
output_df["isSystemSession"] = output_df["SubjectLogonId"].isin(["0x3e7", "-1"])
return output_df
def _add_processname_features(
output_df: pd.DataFrame, force: bool, path_separator: str
):
"""
Add process name default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
path_separator : str
Path separator for OS
"""
if "processName" not in output_df or force:
output_df["processName"] = output_df.apply(
lambda x: x.NewProcessName.split(path_separator)[-1], axis=1
)
if "pathScore" not in output_df or force:
output_df["pathScore"] = output_df.apply(
lambda x: char_ord_score(x.NewProcessName), axis=1
)
if "pathLogScore" not in output_df or force:
output_df["pathLogScore"] = output_df.apply(
lambda x: log10(x.pathScore) if x.pathScore else 0, axis=1
)
if "pathHash" not in output_df or force:
output_df["pathHash"] = output_df.apply(
lambda x: crc32_hash(x.NewProcessName), axis=1
)
def _add_commandline_features(output_df: pd.DataFrame, force: bool):
"""
Add commandline default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
"""
if "commandlineLen" not in output_df or force:
output_df["commandlineLen"] = output_df.apply(
lambda x: len(x.CommandLine), axis=1
)
if "commandlineLogLen" not in output_df or force:
output_df["commandlineLogLen"] = output_df.apply(
lambda x: log10(x.commandlineLen) if x.commandlineLen else 0, axis=1
)
if "commandlineTokensFull" not in output_df or force:
output_df["commandlineTokensFull"] = output_df[["CommandLine"]].apply(
lambda x: delim_count(x.CommandLine), axis=1
)
if "commandlineScore" not in output_df or force:
output_df["commandlineScore"] = output_df.apply(
lambda x: char_ord_score(x.CommandLine), axis=1
)
if "commandlineTokensHash" not in output_df or force:
output_df["commandlineTokensHash"] = output_df.apply(
lambda x: delim_hash(x.CommandLine), axis=1
)
@export
@lru_cache(maxsize=1024)
def delim_count(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Count the delimiters in input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Count of delimiters in the string.
"""
return len(re.findall(delim_list, value))
@export
@lru_cache(maxsize=1024)
def delim_hash(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Return a hash (CRC32) of the delimiters from input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Hash of delimiter set in the string.
"""
return crc32(bytes("".join(re.findall(delim_list, value)), "utf-8"))
@export
@lru_cache(maxsize=1024)
def char_ord_score(value: str, scale: int = 1) -> int:
"""
Return sum of ord values of characters in string.
Parameters
----------
value : str
Data to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
int
[description]
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return floor(sum(ord(x) for x in value) / scale)
@export
@lru_cache(maxsize=1024)
def token_count(value: str, delimiter: str = " ") -> int:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
value : str
Data to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
int
count of tokens
"""
return len(value.split(delimiter))
def _string_score(input_str):
"""Sum the ord(c) for characters in a string."""
return sum(ord(x) for x in input_str)
@export
@lru_cache(maxsize=1024)
def crc32_hash(value: str) -> int:
"""
Return the CRC32 hash of the input column.
Parameters
----------
value : str
Data to process
Returns
-------
int
CRC32 hash
"""
return crc32(bytes(value.encode("utf-8")))
@export
def delim_count_df(
data: pd.DataFrame, column: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]'
) -> pd.Series:
r"""
Count the delimiters in input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
The name of the column to process
delim_list : str, optional
delimiters to use. (the default is r\'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]\')
Returns
-------
pd.Series
Count of delimiters in the string in `column`.
"""
return data[column].str.count(delim_list)
@export
def char_ord_score_df(data: pd.DataFrame, column: str, scale: int = 1) -> pd.Series:
"""
Return sum of ord values of characters in string.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
pd.Series
The sum of the ordinal values of the characters
in `column`.
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return data.apply(lambda x: sum(ord(char) for char in x[column]) / scale, axis=1)
@export
def token_count_df(data: pd.DataFrame, column: str, delimiter: str = " ") -> pd.Series:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
pd.Series
count of tokens in strings in `column`
"""
return data.apply(lambda x: len(x[column].split(delimiter)), axis=1)
@export
def crc32_hash_df(data: pd.DataFrame, column: str) -> pd.Series:
"""
Return the CRC32 hash of the input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
Returns
-------
pd.Series
CRC32 hash of input column
"""
return data.apply(lambda x: crc32(bytes(x[column].encode("utf-8"))), axis=1)
# pylint: disable=too-many-arguments, too-many-statements
@export # noqa: C901, MC0001
def plot_cluster(
db_cluster: DBSCAN,
data: pd.DataFrame,
x_predict: np.ndarray,
plot_label: str = None,
plot_features: Tuple[int, int] = (0, 1),
verbose: bool = False,
cut_off: int = 3,
xlabel: str = None,
ylabel: str = None,
):
"""
Plot clustered data as scatter chart.
Parameters
----------
db_cluster : DBSCAN
DBScan Cluster (from SkLearn DBSCAN).
data : pd.DataFrame
Dataframe containing original data.
x_predict : np.ndarray
The DBSCAN predict numpy array
plot_label : str, optional
If set the column to use to label data points
(the default is None)
plot_features : Tuple[int, int], optional
Which two features in x_predict to plot (the default is (0, 1))
verbose : bool, optional
Verbose execution with some extra info
(the default is False)
cut_off : int, optional
The cluster size below which items are considered outliers
(the default is 3)
xlabel : str, optional
x-axis label (the default is None)
ylabel : str, optional
y-axis label (the default is None)
"""
max_idx = x_predict.shape[1] - 1
if plot_features[0] >= x_predict.shape[1]:
raise ValueError(
"plot_features[0] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[1] >= x_predict.shape[1]:
raise ValueError(
"plot_features[1] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[0] == plot_features[1]:
mssg = "plot_features indexes must be 2 different values in range 0 to"
raise ValueError(mssg + f" {max_idx}.")
labels = db_cluster.labels_
core_samples_mask = np.zeros_like(labels, dtype=bool)
# pylint: disable=unsupported-assignment-operation
# (assignment of numpy array is valid)
core_samples_mask[db_cluster.core_sample_indices_] = True
unique_labels = set(labels)
# pylint: disable=no-member
# Spectral color map does exist
colors = [cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
_, counts = np.unique(labels, return_counts=True)
if verbose:
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(x_predict, labels))
if (
not isinstance(data, pd.DataFrame)
or plot_label is not None
and plot_label not in data
):
plot_label = None
p_label = None
for cluster_id, color in zip(unique_labels, colors):
if cluster_id == -1:
# Black used for noise.
color = [0, 0, 0, 1]
class_member_mask = labels == cluster_id
cluster_size = counts[cluster_id]
marker_size = cluster_size
marker = "o"
font_size = "small"
alpha = 0.4
if cluster_size < cut_off:
marker = "+"
marker_size = 10
font_size = "large"
alpha = 1.0
xy_pos = x_predict[class_member_mask & core_samples_mask]
plt.plot(
xy_pos[:, plot_features[0]],
xy_pos[:, plot_features[1]],
marker,
markerfacecolor=tuple(color),
markersize=marker_size,
)
if plot_label:
first_row = data[class_member_mask].iloc[0]
if not first_row.empty and plot_label in first_row:
p_label = first_row[plot_label]
try:
plt.annotate(
p_label,
xy=(xy_pos[0, plot_features[0]], xy_pos[0, plot_features[1]]),
fontsize=font_size,
alpha=alpha,
)
except IndexError:
pass
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
return plt
|
user_to_dict
|
Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
# MASKED: user_to_dict function (lines 133-149)
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
| 133 | 149 |
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
default_token_validator
|
Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
# MASKED: default_token_validator function (lines 284-321)
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
| 284 | 321 |
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
get_user_by_session
|
Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
# MASKED: get_user_by_session function (lines 351-370)
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
| 351 | 370 |
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
get_user_by_password
|
Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
# MASKED: get_user_by_password function (lines 430-461)
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
| 430 | 461 |
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
set_session
|
Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
# MASKED: set_session function (lines 470-508)
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
| 470 | 508 |
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
get_session_data
|
Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
# MASKED: get_session_data function (lines 518-529)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
| 518 | 529 |
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
|
stage
|
Parametrize tests for Ingress/Egress stage testing.
Args:
request: A fixture to interact with Pytest data.
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
Returns:
str: The ACL stage to be tested.
|
from tests.common import reboot, port_toggle
import os
import time
import random
import logging
import pprint
import pytest
import json
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from tests.common import reboot, port_toggle
from tests.common.helpers.assertions import pytest_require
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
from tests.common.fixtures.duthost_utils import backup_and_restore_config_db_on_duts
from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses
from tests.common.utilities import wait_until
from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.acl,
pytest.mark.disable_loganalyzer, # Disable automatic loganalyzer, since we use it for the test
pytest.mark.topology("any"),
pytest.mark.usefixtures('backup_and_restore_config_db_on_duts')
]
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = "acl_test_dir" # Keep it under home dir so it persists through reboot
FILES_DIR = os.path.join(BASE_DIR, "files")
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
ACL_TABLE_TEMPLATE = "acltb_table.j2"
ACL_REMOVE_RULES_FILE = "acl_rules_del.json"
# TODO: We really shouldn't have two separate templates for v4 and v6, need to combine them somehow
ACL_RULES_FULL_TEMPLATE = {
"ipv4": "acltb_test_rules.j2",
"ipv6": "acltb_v6_test_rules.j2"
}
ACL_RULES_PART_TEMPLATES = {
"ipv4": tuple("acltb_test_rules_part_{}.j2".format(i) for i in xrange(1, 3)),
"ipv6": tuple("acltb_v6_test_rules_part_{}.j2".format(i) for i in xrange(1, 3))
}
DEFAULT_SRC_IP = {
"ipv4": "20.0.0.1",
"ipv6": "60c0:a800::5"
}
# TODO: These routes don't match the VLAN interface from the T0 topology.
# This needs to be addressed before we can enable the v6 tests for T0
DOWNSTREAM_DST_IP = {
"ipv4": "192.168.0.253",
"ipv6": "20c0:a800::2"
}
DOWNSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.0.252",
"ipv6": "20c0:a800::4"
}
DOWNSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.0.251",
"ipv6": "20c0:a800::8"
}
DOWNSTREAM_IP_PORT_MAP = {}
UPSTREAM_DST_IP = {
"ipv4": "192.168.128.1",
"ipv6": "40c0:a800::2"
}
UPSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.136.1",
"ipv6": "40c0:a800::4"
}
UPSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.144.1",
"ipv6": "40c0:a800::8"
}
VLAN_BASE_MAC_PATTERN = "72060001{:04}"
LOG_EXPECT_ACL_TABLE_CREATE_RE = ".*Created ACL table.*"
LOG_EXPECT_ACL_TABLE_REMOVE_RE = ".*Successfully deleted ACL table.*"
LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*"
LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*"
PACKETS_COUNT = "packets_count"
BYTES_COUNT = "bytes_count"
@pytest.fixture(scope="module")
def setup(duthosts, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter):
"""Gather all required test information from DUT and tbinfo.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
tbinfo: A fixture to gather information about the testbed.
Yields:
A Dictionary with required test information.
"""
mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["type"]
vlan_ports = []
vlan_mac = None
if topo == "t0":
vlan_ports = [mg_facts["minigraph_ptf_indices"][ifname]
for ifname in mg_facts["minigraph_vlans"].values()[0]["members"]]
config_facts = rand_selected_dut.get_running_config_facts()
vlan_table = config_facts["VLAN"]
vlan_name = list(vlan_table.keys())[0]
if "mac" in vlan_table[vlan_name]:
vlan_mac = vlan_table[vlan_name]["mac"]
# Get the list of upstream/downstream ports
downstream_ports = defaultdict(list)
upstream_ports = defaultdict(list)
downstream_port_ids = []
upstream_port_ids = []
upstream_port_id_to_router_mac_map = {}
downstream_port_id_to_router_mac_map = {}
# For T0/dual ToR testbeds, we need to use the VLAN MAC to interact with downstream ports
# For T1 testbeds, no VLANs are present so using the router MAC is acceptable
downlink_dst_mac = vlan_mac if vlan_mac is not None else rand_selected_dut.facts["router_mac"]
for interface, neighbor in mg_facts["minigraph_neighbors"].items():
port_id = mg_facts["minigraph_ptf_indices"][interface]
if (topo == "t1" and "T0" in neighbor["name"]) or (topo == "t0" and "Server" in neighbor["name"]):
downstream_ports[neighbor['namespace']].append(interface)
downstream_port_ids.append(port_id)
downstream_port_id_to_router_mac_map[port_id] = downlink_dst_mac
elif (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
upstream_ports[neighbor['namespace']].append(interface)
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_selected_dut.facts["router_mac"]
# stop garp service for single tor
if 'dualtor' not in tbinfo['topo']['name']:
logging.info("Stopping GARP service on single tor")
ptfhost.shell("supervisorctl stop garp_service", module_ignore_errors=True)
# If running on a dual ToR testbed, any uplink for either ToR is an acceptable
# source or destination port
if 'dualtor' in tbinfo['topo']['name'] and rand_unselected_dut is not None:
peer_mg_facts = rand_unselected_dut.get_extended_minigraph_facts(tbinfo)
for interface, neighbor in peer_mg_facts['minigraph_neighbors'].items():
if (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
port_id = peer_mg_facts["minigraph_ptf_indices"][interface]
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_unselected_dut.facts["router_mac"]
# Get the list of LAGs
port_channels = mg_facts["minigraph_portchannels"]
# TODO: We should make this more robust (i.e. bind all active front-panel ports)
acl_table_ports = defaultdict(list)
if topo == "t0" or tbinfo["topo"]["name"] in ("t1", "t1-lag"):
for namespace, port in downstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
if topo == "t0" or tbinfo["topo"]["name"] in ("t1-lag", "t1-64-lag", "t1-64-lag-clet"):
for k, v in port_channels.iteritems():
acl_table_ports[v['namespace']].append(k)
# In multi-asic we need config both in host and namespace.
if v['namespace']:
acl_table_ports[''].append(k)
else:
for namespace, port in upstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
dest_mac_mapping = {
"downlink->uplink": downstream_port_id_to_router_mac_map,
"uplink->downlink": upstream_port_id_to_router_mac_map
}
setup_information = {
"destination_mac": dest_mac_mapping,
"downstream_port_ids": downstream_port_ids,
"upstream_port_ids": upstream_port_ids,
"acl_table_ports": acl_table_ports,
"vlan_ports": vlan_ports,
"topo": topo,
"vlan_mac": vlan_mac
}
logger.info("Gathered variables for ACL test:\n{}".format(pprint.pformat(setup_information)))
logger.info("Creating temporary folder \"{}\" for ACL test".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("mkdir -p {}".format(DUT_TMP_DIR))
yield setup_information
logger.info("Removing temporary directory \"{}\"".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("rm -rf {}".format(DUT_TMP_DIR))
@pytest.fixture(scope="module", params=["ipv4", "ipv6"])
def ip_version(request, tbinfo, duthosts, rand_one_dut_hostname):
if tbinfo["topo"]["type"] == "t0" and request.param == "ipv6":
pytest.skip("IPV6 ACL test not currently supported on t0 testbeds")
return request.param
@pytest.fixture(scope="module")
def populate_vlan_arp_entries(setup, ptfhost, duthosts, rand_one_dut_hostname, ip_version):
"""Set up the ARP responder utility in the PTF container."""
duthost = duthosts[rand_one_dut_hostname]
if setup["topo"] != "t0":
def noop():
pass
yield noop
return # Don't fall through to t0 case
addr_list = [DOWNSTREAM_DST_IP[ip_version], DOWNSTREAM_IP_TO_ALLOW[ip_version], DOWNSTREAM_IP_TO_BLOCK[ip_version]]
vlan_host_map = defaultdict(dict)
for i in range(len(addr_list)):
mac = VLAN_BASE_MAC_PATTERN.format(i)
port = random.choice(setup["vlan_ports"])
addr = addr_list[i]
vlan_host_map[port][str(addr)] = mac
DOWNSTREAM_IP_PORT_MAP[addr] = port
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
with open("/tmp/from_t1.json", "w") as ar_config:
json.dump(arp_responder_conf, ar_config)
ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
ptfhost.template(src="templates/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell("supervisorctl reread && supervisorctl update")
ptfhost.shell("supervisorctl restart arp_responder")
def populate_arp_table():
for dut in duthosts:
dut.command("sonic-clear fdb all")
dut.command("sonic-clear arp")
# Wait some time to ensure the async call of clear is completed
time.sleep(20)
for addr in addr_list:
dut.command("ping {} -c 3".format(addr), module_ignore_errors=True)
populate_arp_table()
yield populate_arp_table
logging.info("Stopping ARP responder")
ptfhost.shell("supervisorctl stop arp_responder")
duthost.command("sonic-clear fdb all")
duthost.command("sonic-clear arp")
# MASKED: stage function (lines 286-305)
def create_or_remove_acl_table(duthost, acl_table_config, setup, op):
for sonic_host_or_asic_inst in duthost.get_sonic_host_and_frontend_asic_instance():
namespace = sonic_host_or_asic_inst.namespace if hasattr(sonic_host_or_asic_inst, 'namespace') else ''
if op == "add":
logger.info("Creating ACL table: \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command(
"config acl add table {} {} -s {} -p {}".format(
acl_table_config["table_name"],
acl_table_config["table_type"],
acl_table_config["table_stage"],
",".join(setup["acl_table_ports"][namespace]),
)
)
else:
logger.info("Removing ACL table \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command("config acl remove table {}".format(acl_table_config["table_name"]))
@pytest.fixture(scope="module")
def acl_table(duthosts, rand_one_dut_hostname, setup, stage, ip_version):
"""Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
ip_version: The IP version under test
Yields:
The ACL table configuration.
"""
table_name = "DATA_{}_{}_TEST".format(stage.upper(), ip_version.upper())
acl_table_config = {
"table_name": table_name,
"table_ports": ",".join(setup["acl_table_ports"]['']),
"table_stage": stage,
"table_type": "L3" if ip_version == "ipv4" else "L3V6"
}
logger.info("Generated ACL table configuration:\n{}".format(pprint.pformat(acl_table_config)))
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "add")
except LogAnalyzerError as err:
# Cleanup Config DB if table creation failed
logger.error("ACL table creation failed, attempting to clean-up...")
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
raise err
try:
yield acl_table_config
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_REMOVE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
class BaseAclTest(object):
"""Base class for testing ACL rules.
Subclasses must provide `setup_rules` method to prepare ACL rules for traffic testing.
They can optionally override `teardown_rules`, which will otherwise remove the rules by
applying an empty configuration file.
"""
__metaclass__ = ABCMeta
ACL_COUNTERS_UPDATE_INTERVAL_SECS = 10
@abstractmethod
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
pass
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Perform actions after rules have been applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
pass
def teardown_rules(self, dut):
"""Tear down ACL rules once the tests have completed.
Args:
dut: The DUT having ACLs applied.
"""
logger.info("Finished with tests, removing all ACL rules...")
# Copy empty rules configuration
dut.copy(src=os.path.join(FILES_DIR, ACL_REMOVE_RULES_FILE), dest=DUT_TMP_DIR)
remove_rules_dut_path = os.path.join(DUT_TMP_DIR, ACL_REMOVE_RULES_FILE)
# Remove the rules
logger.info("Applying \"{}\"".format(remove_rules_dut_path))
dut.command("config acl update full {}".format(remove_rules_dut_path))
@pytest.fixture(scope="class", autouse=True)
def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_entries, tbinfo, ip_version):
"""Setup/teardown ACL rules for the current set of tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
localhost: The host from which tests are run.
setup: Parameters for the ACL tests.
acl_table: Configuration info for the ACL table.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl_rules")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE]
with loganalyzer:
self.setup_rules(duthost, acl_table, ip_version)
self.post_setup_hook(duthost, localhost, populate_vlan_arp_entries, tbinfo)
assert self.check_rule_counters(duthost), "Rule counters should be ready!"
except LogAnalyzerError as err:
# Cleanup Config DB if rule creation failed
logger.error("ACL rule application failed, attempting to clean-up...")
self.teardown_rules(duthost)
raise err
try:
yield
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE]
with loganalyzer:
logger.info("Removing ACL rules")
self.teardown_rules(duthost)
@pytest.yield_fixture(scope="class", autouse=True)
def counters_sanity_check(self, duthosts, acl_rules, acl_table):
"""Validate that the counters for each rule in the rules list increased as expected.
This fixture yields a list of rule IDs. The test case should add on to this list if
it is required to check the rule for increased counters.
After the test cases pass, the fixture will wait for the ACL counters to update and then
check if the counters for each rule in the list were increased.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
acl_rules: Fixture that sets up the ACL rules.
acl_table: Fixture that sets up the ACL table.
"""
acl_facts = defaultdict(dict)
table_name = acl_table["table_name"]
for duthost in duthosts:
acl_facts[duthost]['before']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
rule_list = []
yield rule_list
if not rule_list:
return
# Wait for orchagent to update the ACL counters
time.sleep(self.ACL_COUNTERS_UPDATE_INTERVAL_SECS)
for duthost in duthosts:
acl_facts[duthost]['after']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
for duthost in duthosts:
assert len(acl_facts[duthost]['before']) == len(acl_facts[duthost]['after'])
for rule in rule_list:
rule = "RULE_{}".format(rule)
counters_before = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_before[PACKETS_COUNT] += acl_facts[duthost]['before'][rule][PACKETS_COUNT]
counters_before[BYTES_COUNT] += acl_facts[duthost]['before'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" before traffic:\n{}"
.format(rule, pprint.pformat(counters_before)))
counters_after = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_after[PACKETS_COUNT] += acl_facts[duthost]['after'][rule][PACKETS_COUNT]
counters_after[BYTES_COUNT] += acl_facts[duthost]['after'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" after traffic:\n{}"
.format(rule, pprint.pformat(counters_after)))
assert counters_after[PACKETS_COUNT] > counters_before[PACKETS_COUNT]
assert counters_after[BYTES_COUNT] > counters_before[BYTES_COUNT]
@pytest.fixture(params=["downlink->uplink", "uplink->downlink"])
def direction(self, request):
"""Parametrize test based on direction of traffic."""
return request.param
def check_rule_counters(self, duthost):
logger.info('Wait all rule counters are ready')
return wait_until(60, 2, 0, self.check_rule_counters_internal, duthost)
def check_rule_counters_internal(self, duthost):
for asic_id in duthost.get_frontend_asic_ids():
res = duthost.asic_instance(asic_id).command('aclshow -a')
num_of_lines = len(res['stdout'].split('\n'))
if num_of_lines <= 2 or 'N/A' in res['stdout']:
return False
return True
@pytest.fixture(autouse=True)
def get_src_port(self, setup, direction):
"""Get a source port for the current test."""
src_ports = setup["downstream_port_ids"] if direction == "downlink->uplink" else setup["upstream_port_ids"]
src_port = random.choice(src_ports)
logger.info("Selected source port {}".format(src_port))
self.src_port = src_port
def get_dst_ports(self, setup, direction):
"""Get the set of possible destination ports for the current test."""
return setup["upstream_port_ids"] if direction == "downlink->uplink" else setup["downstream_port_ids"]
def get_dst_ip(self, direction, ip_version):
"""Get the default destination IP for the current test."""
return UPSTREAM_DST_IP[ip_version] if direction == "downlink->uplink" else DOWNSTREAM_DST_IP[ip_version]
def tcp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, proto=None, sport=0x4321, dport=0x51, flags=None):
"""Generate a TCP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
pkt = testutils.simple_tcp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64
)
if proto:
pkt["IP"].proto = proto
else:
pkt = testutils.simple_tcpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ipv6_hlim=64
)
if proto:
pkt["IPv6"].nh = proto
if flags:
pkt["TCP"].flags = flags
return pkt
def udp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, sport=1234, dport=80):
"""Generate a UDP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_udp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ip_ttl=64
)
else:
return testutils.simple_udpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ipv6_hlim=64
)
def icmp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, icmp_type=8, icmp_code=0):
"""Generate an ICMP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_icmp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ip_ttl=64,
)
else:
return testutils.simple_icmpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ipv6_hlim=64,
)
def expected_mask_routed_packet(self, pkt, ip_version):
"""Generate the expected mask for a routed packet."""
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, "dst")
exp_pkt.set_do_not_care_scapy(packet.Ether, "src")
if ip_version == "ipv4":
exp_pkt.set_do_not_care_scapy(packet.IP, "chksum")
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
else:
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
return exp_pkt
def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that unmatched packets are dropped for ingress."""
if stage == "egress":
pytest.skip("Only run for ingress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that default egress rule allow all traffics"""
if stage == "ingress":
pytest.skip("Only run for egress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
def test_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on source IP."""
src_ip = "20.0.0.2" if ip_version == "ipv4" else "60c0:a800::6"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(1)
def test_rules_priority_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the forwarding case."""
src_ip = "20.0.0.7" if ip_version == "ipv4" else "60c0:a800::7"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(20)
def test_rules_priority_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the drop case."""
src_ip = "20.0.0.3" if ip_version == "ipv4" else "60c0:a800::4"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(7)
def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_ALLOW[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_ALLOW[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(2 if direction == "uplink->downlink" else 3)
def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_BLOCK[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_BLOCK[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(15 if direction == "uplink->downlink" else 16)
def test_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on source IP."""
src_ip = "20.0.0.6" if ip_version == "ipv4" else "60c0:a800::3"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(14)
def test_udp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a UDP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(13)
def test_udp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a UDP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(26)
def test_icmp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop an ICMP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(25)
def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward an ICMP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(12)
def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x1217)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x120D)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(4)
def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x123B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(11)
def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x123A)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(10)
def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(17)
def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7E)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x1B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(6)
def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(10)
def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7F)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(18)
def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x24)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(5)
def test_icmp_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
src_ip = "20.0.0.10" if ip_version == "ipv4" else "60c0:a800::10"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(29)
def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version):
exp_pkt = self.expected_mask_routed_packet(pkt, ip_version)
if ip_version == "ipv4":
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IP].dst)
else:
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IPv6].dst)
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, self.src_port, pkt)
if direction == "uplink->downlink" and downstream_dst_port:
if dropped:
testutils.verify_no_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
testutils.verify_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
if dropped:
testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction))
else:
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction),
timeout=20)
class TestBasicAcl(BaseAclTest):
"""Test Basic functionality of ACL rules (i.e. setup with full update on a running device)."""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating basic ACL rules config for ACL table \"{}\" on {}".format(table_name, dut))
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}.json".format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]),
dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update full {}".format(dut_conf_file_path))
class TestIncrementalAcl(BaseAclTest):
"""Test ACL rule functionality with an incremental configuration.
Verify that everything still works as expected when an ACL configuration is applied in
multiple parts.
"""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating incremental ACL rules config for ACL table \"{}\""
.format(table_name))
for part, config_file in enumerate(ACL_RULES_PART_TEMPLATES[ip_version]):
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}_part_{}.json".format(table_name, part))
dut.template(src=os.path.join(TEMPLATE_DIR, config_file), dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update incremental {}".format(dut_conf_file_path))
@pytest.mark.reboot
class TestAclWithReboot(TestBasicAcl):
"""Test ACL rule functionality with a reboot.
Verify that configuration persists correctly after reboot and is applied properly
upon startup.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Save configuration and reboot after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
dut.command("config save -y")
reboot(dut, localhost, wait=240)
# We need some additional delay on e1031
if dut.facts["platform"] == "x86_64-cel_e1031-r0":
time.sleep(240)
populate_vlan_arp_entries()
@pytest.mark.port_toggle
class TestAclWithPortToggle(TestBasicAcl):
"""Test ACL rule functionality after toggling ports.
Verify that ACLs still function as expected after links flap.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Toggle ports after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
port_toggle(dut, tbinfo)
populate_vlan_arp_entries()
|
@pytest.fixture(scope="module", params=["ingress", "egress"])
def stage(request, duthosts, rand_one_dut_hostname):
"""Parametrize tests for Ingress/Egress stage testing.
Args:
request: A fixture to interact with Pytest data.
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
Returns:
str: The ACL stage to be tested.
"""
duthost = duthosts[rand_one_dut_hostname]
pytest_require(
request.param == "ingress" or duthost.facts["asic_type"] not in ("broadcom"),
"Egress ACLs are not currently supported on \"{}\" ASICs".format(duthost.facts["asic_type"])
)
return request.param
| 286 | 305 |
from tests.common import reboot, port_toggle
import os
import time
import random
import logging
import pprint
import pytest
import json
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from tests.common import reboot, port_toggle
from tests.common.helpers.assertions import pytest_require
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
from tests.common.fixtures.duthost_utils import backup_and_restore_config_db_on_duts
from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses
from tests.common.utilities import wait_until
from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.acl,
pytest.mark.disable_loganalyzer, # Disable automatic loganalyzer, since we use it for the test
pytest.mark.topology("any"),
pytest.mark.usefixtures('backup_and_restore_config_db_on_duts')
]
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = "acl_test_dir" # Keep it under home dir so it persists through reboot
FILES_DIR = os.path.join(BASE_DIR, "files")
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
ACL_TABLE_TEMPLATE = "acltb_table.j2"
ACL_REMOVE_RULES_FILE = "acl_rules_del.json"
# TODO: We really shouldn't have two separate templates for v4 and v6, need to combine them somehow
ACL_RULES_FULL_TEMPLATE = {
"ipv4": "acltb_test_rules.j2",
"ipv6": "acltb_v6_test_rules.j2"
}
ACL_RULES_PART_TEMPLATES = {
"ipv4": tuple("acltb_test_rules_part_{}.j2".format(i) for i in xrange(1, 3)),
"ipv6": tuple("acltb_v6_test_rules_part_{}.j2".format(i) for i in xrange(1, 3))
}
DEFAULT_SRC_IP = {
"ipv4": "20.0.0.1",
"ipv6": "60c0:a800::5"
}
# TODO: These routes don't match the VLAN interface from the T0 topology.
# This needs to be addressed before we can enable the v6 tests for T0
DOWNSTREAM_DST_IP = {
"ipv4": "192.168.0.253",
"ipv6": "20c0:a800::2"
}
DOWNSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.0.252",
"ipv6": "20c0:a800::4"
}
DOWNSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.0.251",
"ipv6": "20c0:a800::8"
}
DOWNSTREAM_IP_PORT_MAP = {}
UPSTREAM_DST_IP = {
"ipv4": "192.168.128.1",
"ipv6": "40c0:a800::2"
}
UPSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.136.1",
"ipv6": "40c0:a800::4"
}
UPSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.144.1",
"ipv6": "40c0:a800::8"
}
VLAN_BASE_MAC_PATTERN = "72060001{:04}"
LOG_EXPECT_ACL_TABLE_CREATE_RE = ".*Created ACL table.*"
LOG_EXPECT_ACL_TABLE_REMOVE_RE = ".*Successfully deleted ACL table.*"
LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*"
LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*"
PACKETS_COUNT = "packets_count"
BYTES_COUNT = "bytes_count"
@pytest.fixture(scope="module")
def setup(duthosts, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter):
"""Gather all required test information from DUT and tbinfo.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
tbinfo: A fixture to gather information about the testbed.
Yields:
A Dictionary with required test information.
"""
mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["type"]
vlan_ports = []
vlan_mac = None
if topo == "t0":
vlan_ports = [mg_facts["minigraph_ptf_indices"][ifname]
for ifname in mg_facts["minigraph_vlans"].values()[0]["members"]]
config_facts = rand_selected_dut.get_running_config_facts()
vlan_table = config_facts["VLAN"]
vlan_name = list(vlan_table.keys())[0]
if "mac" in vlan_table[vlan_name]:
vlan_mac = vlan_table[vlan_name]["mac"]
# Get the list of upstream/downstream ports
downstream_ports = defaultdict(list)
upstream_ports = defaultdict(list)
downstream_port_ids = []
upstream_port_ids = []
upstream_port_id_to_router_mac_map = {}
downstream_port_id_to_router_mac_map = {}
# For T0/dual ToR testbeds, we need to use the VLAN MAC to interact with downstream ports
# For T1 testbeds, no VLANs are present so using the router MAC is acceptable
downlink_dst_mac = vlan_mac if vlan_mac is not None else rand_selected_dut.facts["router_mac"]
for interface, neighbor in mg_facts["minigraph_neighbors"].items():
port_id = mg_facts["minigraph_ptf_indices"][interface]
if (topo == "t1" and "T0" in neighbor["name"]) or (topo == "t0" and "Server" in neighbor["name"]):
downstream_ports[neighbor['namespace']].append(interface)
downstream_port_ids.append(port_id)
downstream_port_id_to_router_mac_map[port_id] = downlink_dst_mac
elif (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
upstream_ports[neighbor['namespace']].append(interface)
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_selected_dut.facts["router_mac"]
# stop garp service for single tor
if 'dualtor' not in tbinfo['topo']['name']:
logging.info("Stopping GARP service on single tor")
ptfhost.shell("supervisorctl stop garp_service", module_ignore_errors=True)
# If running on a dual ToR testbed, any uplink for either ToR is an acceptable
# source or destination port
if 'dualtor' in tbinfo['topo']['name'] and rand_unselected_dut is not None:
peer_mg_facts = rand_unselected_dut.get_extended_minigraph_facts(tbinfo)
for interface, neighbor in peer_mg_facts['minigraph_neighbors'].items():
if (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
port_id = peer_mg_facts["minigraph_ptf_indices"][interface]
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_unselected_dut.facts["router_mac"]
# Get the list of LAGs
port_channels = mg_facts["minigraph_portchannels"]
# TODO: We should make this more robust (i.e. bind all active front-panel ports)
acl_table_ports = defaultdict(list)
if topo == "t0" or tbinfo["topo"]["name"] in ("t1", "t1-lag"):
for namespace, port in downstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
if topo == "t0" or tbinfo["topo"]["name"] in ("t1-lag", "t1-64-lag", "t1-64-lag-clet"):
for k, v in port_channels.iteritems():
acl_table_ports[v['namespace']].append(k)
# In multi-asic we need config both in host and namespace.
if v['namespace']:
acl_table_ports[''].append(k)
else:
for namespace, port in upstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
dest_mac_mapping = {
"downlink->uplink": downstream_port_id_to_router_mac_map,
"uplink->downlink": upstream_port_id_to_router_mac_map
}
setup_information = {
"destination_mac": dest_mac_mapping,
"downstream_port_ids": downstream_port_ids,
"upstream_port_ids": upstream_port_ids,
"acl_table_ports": acl_table_ports,
"vlan_ports": vlan_ports,
"topo": topo,
"vlan_mac": vlan_mac
}
logger.info("Gathered variables for ACL test:\n{}".format(pprint.pformat(setup_information)))
logger.info("Creating temporary folder \"{}\" for ACL test".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("mkdir -p {}".format(DUT_TMP_DIR))
yield setup_information
logger.info("Removing temporary directory \"{}\"".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("rm -rf {}".format(DUT_TMP_DIR))
@pytest.fixture(scope="module", params=["ipv4", "ipv6"])
def ip_version(request, tbinfo, duthosts, rand_one_dut_hostname):
if tbinfo["topo"]["type"] == "t0" and request.param == "ipv6":
pytest.skip("IPV6 ACL test not currently supported on t0 testbeds")
return request.param
@pytest.fixture(scope="module")
def populate_vlan_arp_entries(setup, ptfhost, duthosts, rand_one_dut_hostname, ip_version):
"""Set up the ARP responder utility in the PTF container."""
duthost = duthosts[rand_one_dut_hostname]
if setup["topo"] != "t0":
def noop():
pass
yield noop
return # Don't fall through to t0 case
addr_list = [DOWNSTREAM_DST_IP[ip_version], DOWNSTREAM_IP_TO_ALLOW[ip_version], DOWNSTREAM_IP_TO_BLOCK[ip_version]]
vlan_host_map = defaultdict(dict)
for i in range(len(addr_list)):
mac = VLAN_BASE_MAC_PATTERN.format(i)
port = random.choice(setup["vlan_ports"])
addr = addr_list[i]
vlan_host_map[port][str(addr)] = mac
DOWNSTREAM_IP_PORT_MAP[addr] = port
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
with open("/tmp/from_t1.json", "w") as ar_config:
json.dump(arp_responder_conf, ar_config)
ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
ptfhost.template(src="templates/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell("supervisorctl reread && supervisorctl update")
ptfhost.shell("supervisorctl restart arp_responder")
def populate_arp_table():
for dut in duthosts:
dut.command("sonic-clear fdb all")
dut.command("sonic-clear arp")
# Wait some time to ensure the async call of clear is completed
time.sleep(20)
for addr in addr_list:
dut.command("ping {} -c 3".format(addr), module_ignore_errors=True)
populate_arp_table()
yield populate_arp_table
logging.info("Stopping ARP responder")
ptfhost.shell("supervisorctl stop arp_responder")
duthost.command("sonic-clear fdb all")
duthost.command("sonic-clear arp")
@pytest.fixture(scope="module", params=["ingress", "egress"])
def stage(request, duthosts, rand_one_dut_hostname):
"""Parametrize tests for Ingress/Egress stage testing.
Args:
request: A fixture to interact with Pytest data.
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
Returns:
str: The ACL stage to be tested.
"""
duthost = duthosts[rand_one_dut_hostname]
pytest_require(
request.param == "ingress" or duthost.facts["asic_type"] not in ("broadcom"),
"Egress ACLs are not currently supported on \"{}\" ASICs".format(duthost.facts["asic_type"])
)
return request.param
def create_or_remove_acl_table(duthost, acl_table_config, setup, op):
for sonic_host_or_asic_inst in duthost.get_sonic_host_and_frontend_asic_instance():
namespace = sonic_host_or_asic_inst.namespace if hasattr(sonic_host_or_asic_inst, 'namespace') else ''
if op == "add":
logger.info("Creating ACL table: \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command(
"config acl add table {} {} -s {} -p {}".format(
acl_table_config["table_name"],
acl_table_config["table_type"],
acl_table_config["table_stage"],
",".join(setup["acl_table_ports"][namespace]),
)
)
else:
logger.info("Removing ACL table \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command("config acl remove table {}".format(acl_table_config["table_name"]))
@pytest.fixture(scope="module")
def acl_table(duthosts, rand_one_dut_hostname, setup, stage, ip_version):
"""Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
ip_version: The IP version under test
Yields:
The ACL table configuration.
"""
table_name = "DATA_{}_{}_TEST".format(stage.upper(), ip_version.upper())
acl_table_config = {
"table_name": table_name,
"table_ports": ",".join(setup["acl_table_ports"]['']),
"table_stage": stage,
"table_type": "L3" if ip_version == "ipv4" else "L3V6"
}
logger.info("Generated ACL table configuration:\n{}".format(pprint.pformat(acl_table_config)))
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "add")
except LogAnalyzerError as err:
# Cleanup Config DB if table creation failed
logger.error("ACL table creation failed, attempting to clean-up...")
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
raise err
try:
yield acl_table_config
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_REMOVE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
class BaseAclTest(object):
"""Base class for testing ACL rules.
Subclasses must provide `setup_rules` method to prepare ACL rules for traffic testing.
They can optionally override `teardown_rules`, which will otherwise remove the rules by
applying an empty configuration file.
"""
__metaclass__ = ABCMeta
ACL_COUNTERS_UPDATE_INTERVAL_SECS = 10
@abstractmethod
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
pass
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Perform actions after rules have been applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
pass
def teardown_rules(self, dut):
"""Tear down ACL rules once the tests have completed.
Args:
dut: The DUT having ACLs applied.
"""
logger.info("Finished with tests, removing all ACL rules...")
# Copy empty rules configuration
dut.copy(src=os.path.join(FILES_DIR, ACL_REMOVE_RULES_FILE), dest=DUT_TMP_DIR)
remove_rules_dut_path = os.path.join(DUT_TMP_DIR, ACL_REMOVE_RULES_FILE)
# Remove the rules
logger.info("Applying \"{}\"".format(remove_rules_dut_path))
dut.command("config acl update full {}".format(remove_rules_dut_path))
@pytest.fixture(scope="class", autouse=True)
def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_entries, tbinfo, ip_version):
"""Setup/teardown ACL rules for the current set of tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
localhost: The host from which tests are run.
setup: Parameters for the ACL tests.
acl_table: Configuration info for the ACL table.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl_rules")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE]
with loganalyzer:
self.setup_rules(duthost, acl_table, ip_version)
self.post_setup_hook(duthost, localhost, populate_vlan_arp_entries, tbinfo)
assert self.check_rule_counters(duthost), "Rule counters should be ready!"
except LogAnalyzerError as err:
# Cleanup Config DB if rule creation failed
logger.error("ACL rule application failed, attempting to clean-up...")
self.teardown_rules(duthost)
raise err
try:
yield
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE]
with loganalyzer:
logger.info("Removing ACL rules")
self.teardown_rules(duthost)
@pytest.yield_fixture(scope="class", autouse=True)
def counters_sanity_check(self, duthosts, acl_rules, acl_table):
"""Validate that the counters for each rule in the rules list increased as expected.
This fixture yields a list of rule IDs. The test case should add on to this list if
it is required to check the rule for increased counters.
After the test cases pass, the fixture will wait for the ACL counters to update and then
check if the counters for each rule in the list were increased.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
acl_rules: Fixture that sets up the ACL rules.
acl_table: Fixture that sets up the ACL table.
"""
acl_facts = defaultdict(dict)
table_name = acl_table["table_name"]
for duthost in duthosts:
acl_facts[duthost]['before']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
rule_list = []
yield rule_list
if not rule_list:
return
# Wait for orchagent to update the ACL counters
time.sleep(self.ACL_COUNTERS_UPDATE_INTERVAL_SECS)
for duthost in duthosts:
acl_facts[duthost]['after']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
for duthost in duthosts:
assert len(acl_facts[duthost]['before']) == len(acl_facts[duthost]['after'])
for rule in rule_list:
rule = "RULE_{}".format(rule)
counters_before = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_before[PACKETS_COUNT] += acl_facts[duthost]['before'][rule][PACKETS_COUNT]
counters_before[BYTES_COUNT] += acl_facts[duthost]['before'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" before traffic:\n{}"
.format(rule, pprint.pformat(counters_before)))
counters_after = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_after[PACKETS_COUNT] += acl_facts[duthost]['after'][rule][PACKETS_COUNT]
counters_after[BYTES_COUNT] += acl_facts[duthost]['after'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" after traffic:\n{}"
.format(rule, pprint.pformat(counters_after)))
assert counters_after[PACKETS_COUNT] > counters_before[PACKETS_COUNT]
assert counters_after[BYTES_COUNT] > counters_before[BYTES_COUNT]
@pytest.fixture(params=["downlink->uplink", "uplink->downlink"])
def direction(self, request):
"""Parametrize test based on direction of traffic."""
return request.param
def check_rule_counters(self, duthost):
logger.info('Wait all rule counters are ready')
return wait_until(60, 2, 0, self.check_rule_counters_internal, duthost)
def check_rule_counters_internal(self, duthost):
for asic_id in duthost.get_frontend_asic_ids():
res = duthost.asic_instance(asic_id).command('aclshow -a')
num_of_lines = len(res['stdout'].split('\n'))
if num_of_lines <= 2 or 'N/A' in res['stdout']:
return False
return True
@pytest.fixture(autouse=True)
def get_src_port(self, setup, direction):
"""Get a source port for the current test."""
src_ports = setup["downstream_port_ids"] if direction == "downlink->uplink" else setup["upstream_port_ids"]
src_port = random.choice(src_ports)
logger.info("Selected source port {}".format(src_port))
self.src_port = src_port
def get_dst_ports(self, setup, direction):
"""Get the set of possible destination ports for the current test."""
return setup["upstream_port_ids"] if direction == "downlink->uplink" else setup["downstream_port_ids"]
def get_dst_ip(self, direction, ip_version):
"""Get the default destination IP for the current test."""
return UPSTREAM_DST_IP[ip_version] if direction == "downlink->uplink" else DOWNSTREAM_DST_IP[ip_version]
def tcp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, proto=None, sport=0x4321, dport=0x51, flags=None):
"""Generate a TCP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
pkt = testutils.simple_tcp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64
)
if proto:
pkt["IP"].proto = proto
else:
pkt = testutils.simple_tcpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ipv6_hlim=64
)
if proto:
pkt["IPv6"].nh = proto
if flags:
pkt["TCP"].flags = flags
return pkt
def udp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, sport=1234, dport=80):
"""Generate a UDP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_udp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ip_ttl=64
)
else:
return testutils.simple_udpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ipv6_hlim=64
)
def icmp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, icmp_type=8, icmp_code=0):
"""Generate an ICMP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_icmp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ip_ttl=64,
)
else:
return testutils.simple_icmpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ipv6_hlim=64,
)
def expected_mask_routed_packet(self, pkt, ip_version):
"""Generate the expected mask for a routed packet."""
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, "dst")
exp_pkt.set_do_not_care_scapy(packet.Ether, "src")
if ip_version == "ipv4":
exp_pkt.set_do_not_care_scapy(packet.IP, "chksum")
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
else:
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
return exp_pkt
def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that unmatched packets are dropped for ingress."""
if stage == "egress":
pytest.skip("Only run for ingress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that default egress rule allow all traffics"""
if stage == "ingress":
pytest.skip("Only run for egress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
def test_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on source IP."""
src_ip = "20.0.0.2" if ip_version == "ipv4" else "60c0:a800::6"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(1)
def test_rules_priority_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the forwarding case."""
src_ip = "20.0.0.7" if ip_version == "ipv4" else "60c0:a800::7"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(20)
def test_rules_priority_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the drop case."""
src_ip = "20.0.0.3" if ip_version == "ipv4" else "60c0:a800::4"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(7)
def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_ALLOW[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_ALLOW[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(2 if direction == "uplink->downlink" else 3)
def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_BLOCK[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_BLOCK[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(15 if direction == "uplink->downlink" else 16)
def test_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on source IP."""
src_ip = "20.0.0.6" if ip_version == "ipv4" else "60c0:a800::3"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(14)
def test_udp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a UDP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(13)
def test_udp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a UDP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(26)
def test_icmp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop an ICMP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(25)
def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward an ICMP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(12)
def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x1217)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x120D)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(4)
def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x123B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(11)
def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x123A)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(10)
def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(17)
def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7E)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x1B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(6)
def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(10)
def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7F)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(18)
def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x24)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(5)
def test_icmp_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
src_ip = "20.0.0.10" if ip_version == "ipv4" else "60c0:a800::10"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(29)
def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version):
exp_pkt = self.expected_mask_routed_packet(pkt, ip_version)
if ip_version == "ipv4":
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IP].dst)
else:
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IPv6].dst)
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, self.src_port, pkt)
if direction == "uplink->downlink" and downstream_dst_port:
if dropped:
testutils.verify_no_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
testutils.verify_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
if dropped:
testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction))
else:
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction),
timeout=20)
class TestBasicAcl(BaseAclTest):
"""Test Basic functionality of ACL rules (i.e. setup with full update on a running device)."""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating basic ACL rules config for ACL table \"{}\" on {}".format(table_name, dut))
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}.json".format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]),
dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update full {}".format(dut_conf_file_path))
class TestIncrementalAcl(BaseAclTest):
"""Test ACL rule functionality with an incremental configuration.
Verify that everything still works as expected when an ACL configuration is applied in
multiple parts.
"""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating incremental ACL rules config for ACL table \"{}\""
.format(table_name))
for part, config_file in enumerate(ACL_RULES_PART_TEMPLATES[ip_version]):
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}_part_{}.json".format(table_name, part))
dut.template(src=os.path.join(TEMPLATE_DIR, config_file), dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update incremental {}".format(dut_conf_file_path))
@pytest.mark.reboot
class TestAclWithReboot(TestBasicAcl):
"""Test ACL rule functionality with a reboot.
Verify that configuration persists correctly after reboot and is applied properly
upon startup.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Save configuration and reboot after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
dut.command("config save -y")
reboot(dut, localhost, wait=240)
# We need some additional delay on e1031
if dut.facts["platform"] == "x86_64-cel_e1031-r0":
time.sleep(240)
populate_vlan_arp_entries()
@pytest.mark.port_toggle
class TestAclWithPortToggle(TestBasicAcl):
"""Test ACL rule functionality after toggling ports.
Verify that ACLs still function as expected after links flap.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Toggle ports after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
port_toggle(dut, tbinfo)
populate_vlan_arp_entries()
|
acl_table
|
Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
ip_version: The IP version under test
Yields:
The ACL table configuration.
|
from tests.common import reboot, port_toggle
import os
import time
import random
import logging
import pprint
import pytest
import json
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from tests.common import reboot, port_toggle
from tests.common.helpers.assertions import pytest_require
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
from tests.common.fixtures.duthost_utils import backup_and_restore_config_db_on_duts
from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses
from tests.common.utilities import wait_until
from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.acl,
pytest.mark.disable_loganalyzer, # Disable automatic loganalyzer, since we use it for the test
pytest.mark.topology("any"),
pytest.mark.usefixtures('backup_and_restore_config_db_on_duts')
]
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = "acl_test_dir" # Keep it under home dir so it persists through reboot
FILES_DIR = os.path.join(BASE_DIR, "files")
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
ACL_TABLE_TEMPLATE = "acltb_table.j2"
ACL_REMOVE_RULES_FILE = "acl_rules_del.json"
# TODO: We really shouldn't have two separate templates for v4 and v6, need to combine them somehow
ACL_RULES_FULL_TEMPLATE = {
"ipv4": "acltb_test_rules.j2",
"ipv6": "acltb_v6_test_rules.j2"
}
ACL_RULES_PART_TEMPLATES = {
"ipv4": tuple("acltb_test_rules_part_{}.j2".format(i) for i in xrange(1, 3)),
"ipv6": tuple("acltb_v6_test_rules_part_{}.j2".format(i) for i in xrange(1, 3))
}
DEFAULT_SRC_IP = {
"ipv4": "20.0.0.1",
"ipv6": "60c0:a800::5"
}
# TODO: These routes don't match the VLAN interface from the T0 topology.
# This needs to be addressed before we can enable the v6 tests for T0
DOWNSTREAM_DST_IP = {
"ipv4": "192.168.0.253",
"ipv6": "20c0:a800::2"
}
DOWNSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.0.252",
"ipv6": "20c0:a800::4"
}
DOWNSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.0.251",
"ipv6": "20c0:a800::8"
}
DOWNSTREAM_IP_PORT_MAP = {}
UPSTREAM_DST_IP = {
"ipv4": "192.168.128.1",
"ipv6": "40c0:a800::2"
}
UPSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.136.1",
"ipv6": "40c0:a800::4"
}
UPSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.144.1",
"ipv6": "40c0:a800::8"
}
VLAN_BASE_MAC_PATTERN = "72060001{:04}"
LOG_EXPECT_ACL_TABLE_CREATE_RE = ".*Created ACL table.*"
LOG_EXPECT_ACL_TABLE_REMOVE_RE = ".*Successfully deleted ACL table.*"
LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*"
LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*"
PACKETS_COUNT = "packets_count"
BYTES_COUNT = "bytes_count"
@pytest.fixture(scope="module")
def setup(duthosts, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter):
"""Gather all required test information from DUT and tbinfo.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
tbinfo: A fixture to gather information about the testbed.
Yields:
A Dictionary with required test information.
"""
mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["type"]
vlan_ports = []
vlan_mac = None
if topo == "t0":
vlan_ports = [mg_facts["minigraph_ptf_indices"][ifname]
for ifname in mg_facts["minigraph_vlans"].values()[0]["members"]]
config_facts = rand_selected_dut.get_running_config_facts()
vlan_table = config_facts["VLAN"]
vlan_name = list(vlan_table.keys())[0]
if "mac" in vlan_table[vlan_name]:
vlan_mac = vlan_table[vlan_name]["mac"]
# Get the list of upstream/downstream ports
downstream_ports = defaultdict(list)
upstream_ports = defaultdict(list)
downstream_port_ids = []
upstream_port_ids = []
upstream_port_id_to_router_mac_map = {}
downstream_port_id_to_router_mac_map = {}
# For T0/dual ToR testbeds, we need to use the VLAN MAC to interact with downstream ports
# For T1 testbeds, no VLANs are present so using the router MAC is acceptable
downlink_dst_mac = vlan_mac if vlan_mac is not None else rand_selected_dut.facts["router_mac"]
for interface, neighbor in mg_facts["minigraph_neighbors"].items():
port_id = mg_facts["minigraph_ptf_indices"][interface]
if (topo == "t1" and "T0" in neighbor["name"]) or (topo == "t0" and "Server" in neighbor["name"]):
downstream_ports[neighbor['namespace']].append(interface)
downstream_port_ids.append(port_id)
downstream_port_id_to_router_mac_map[port_id] = downlink_dst_mac
elif (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
upstream_ports[neighbor['namespace']].append(interface)
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_selected_dut.facts["router_mac"]
# stop garp service for single tor
if 'dualtor' not in tbinfo['topo']['name']:
logging.info("Stopping GARP service on single tor")
ptfhost.shell("supervisorctl stop garp_service", module_ignore_errors=True)
# If running on a dual ToR testbed, any uplink for either ToR is an acceptable
# source or destination port
if 'dualtor' in tbinfo['topo']['name'] and rand_unselected_dut is not None:
peer_mg_facts = rand_unselected_dut.get_extended_minigraph_facts(tbinfo)
for interface, neighbor in peer_mg_facts['minigraph_neighbors'].items():
if (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
port_id = peer_mg_facts["minigraph_ptf_indices"][interface]
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_unselected_dut.facts["router_mac"]
# Get the list of LAGs
port_channels = mg_facts["minigraph_portchannels"]
# TODO: We should make this more robust (i.e. bind all active front-panel ports)
acl_table_ports = defaultdict(list)
if topo == "t0" or tbinfo["topo"]["name"] in ("t1", "t1-lag"):
for namespace, port in downstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
if topo == "t0" or tbinfo["topo"]["name"] in ("t1-lag", "t1-64-lag", "t1-64-lag-clet"):
for k, v in port_channels.iteritems():
acl_table_ports[v['namespace']].append(k)
# In multi-asic we need config both in host and namespace.
if v['namespace']:
acl_table_ports[''].append(k)
else:
for namespace, port in upstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
dest_mac_mapping = {
"downlink->uplink": downstream_port_id_to_router_mac_map,
"uplink->downlink": upstream_port_id_to_router_mac_map
}
setup_information = {
"destination_mac": dest_mac_mapping,
"downstream_port_ids": downstream_port_ids,
"upstream_port_ids": upstream_port_ids,
"acl_table_ports": acl_table_ports,
"vlan_ports": vlan_ports,
"topo": topo,
"vlan_mac": vlan_mac
}
logger.info("Gathered variables for ACL test:\n{}".format(pprint.pformat(setup_information)))
logger.info("Creating temporary folder \"{}\" for ACL test".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("mkdir -p {}".format(DUT_TMP_DIR))
yield setup_information
logger.info("Removing temporary directory \"{}\"".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("rm -rf {}".format(DUT_TMP_DIR))
@pytest.fixture(scope="module", params=["ipv4", "ipv6"])
def ip_version(request, tbinfo, duthosts, rand_one_dut_hostname):
if tbinfo["topo"]["type"] == "t0" and request.param == "ipv6":
pytest.skip("IPV6 ACL test not currently supported on t0 testbeds")
return request.param
@pytest.fixture(scope="module")
def populate_vlan_arp_entries(setup, ptfhost, duthosts, rand_one_dut_hostname, ip_version):
"""Set up the ARP responder utility in the PTF container."""
duthost = duthosts[rand_one_dut_hostname]
if setup["topo"] != "t0":
def noop():
pass
yield noop
return # Don't fall through to t0 case
addr_list = [DOWNSTREAM_DST_IP[ip_version], DOWNSTREAM_IP_TO_ALLOW[ip_version], DOWNSTREAM_IP_TO_BLOCK[ip_version]]
vlan_host_map = defaultdict(dict)
for i in range(len(addr_list)):
mac = VLAN_BASE_MAC_PATTERN.format(i)
port = random.choice(setup["vlan_ports"])
addr = addr_list[i]
vlan_host_map[port][str(addr)] = mac
DOWNSTREAM_IP_PORT_MAP[addr] = port
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
with open("/tmp/from_t1.json", "w") as ar_config:
json.dump(arp_responder_conf, ar_config)
ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
ptfhost.template(src="templates/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell("supervisorctl reread && supervisorctl update")
ptfhost.shell("supervisorctl restart arp_responder")
def populate_arp_table():
for dut in duthosts:
dut.command("sonic-clear fdb all")
dut.command("sonic-clear arp")
# Wait some time to ensure the async call of clear is completed
time.sleep(20)
for addr in addr_list:
dut.command("ping {} -c 3".format(addr), module_ignore_errors=True)
populate_arp_table()
yield populate_arp_table
logging.info("Stopping ARP responder")
ptfhost.shell("supervisorctl stop arp_responder")
duthost.command("sonic-clear fdb all")
duthost.command("sonic-clear arp")
@pytest.fixture(scope="module", params=["ingress", "egress"])
def stage(request, duthosts, rand_one_dut_hostname):
"""Parametrize tests for Ingress/Egress stage testing.
Args:
request: A fixture to interact with Pytest data.
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
Returns:
str: The ACL stage to be tested.
"""
duthost = duthosts[rand_one_dut_hostname]
pytest_require(
request.param == "ingress" or duthost.facts["asic_type"] not in ("broadcom"),
"Egress ACLs are not currently supported on \"{}\" ASICs".format(duthost.facts["asic_type"])
)
return request.param
def create_or_remove_acl_table(duthost, acl_table_config, setup, op):
for sonic_host_or_asic_inst in duthost.get_sonic_host_and_frontend_asic_instance():
namespace = sonic_host_or_asic_inst.namespace if hasattr(sonic_host_or_asic_inst, 'namespace') else ''
if op == "add":
logger.info("Creating ACL table: \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command(
"config acl add table {} {} -s {} -p {}".format(
acl_table_config["table_name"],
acl_table_config["table_type"],
acl_table_config["table_stage"],
",".join(setup["acl_table_ports"][namespace]),
)
)
else:
logger.info("Removing ACL table \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command("config acl remove table {}".format(acl_table_config["table_name"]))
# MASKED: acl_table function (lines 324-372)
class BaseAclTest(object):
"""Base class for testing ACL rules.
Subclasses must provide `setup_rules` method to prepare ACL rules for traffic testing.
They can optionally override `teardown_rules`, which will otherwise remove the rules by
applying an empty configuration file.
"""
__metaclass__ = ABCMeta
ACL_COUNTERS_UPDATE_INTERVAL_SECS = 10
@abstractmethod
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
pass
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Perform actions after rules have been applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
pass
def teardown_rules(self, dut):
"""Tear down ACL rules once the tests have completed.
Args:
dut: The DUT having ACLs applied.
"""
logger.info("Finished with tests, removing all ACL rules...")
# Copy empty rules configuration
dut.copy(src=os.path.join(FILES_DIR, ACL_REMOVE_RULES_FILE), dest=DUT_TMP_DIR)
remove_rules_dut_path = os.path.join(DUT_TMP_DIR, ACL_REMOVE_RULES_FILE)
# Remove the rules
logger.info("Applying \"{}\"".format(remove_rules_dut_path))
dut.command("config acl update full {}".format(remove_rules_dut_path))
@pytest.fixture(scope="class", autouse=True)
def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_entries, tbinfo, ip_version):
"""Setup/teardown ACL rules for the current set of tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
localhost: The host from which tests are run.
setup: Parameters for the ACL tests.
acl_table: Configuration info for the ACL table.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl_rules")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE]
with loganalyzer:
self.setup_rules(duthost, acl_table, ip_version)
self.post_setup_hook(duthost, localhost, populate_vlan_arp_entries, tbinfo)
assert self.check_rule_counters(duthost), "Rule counters should be ready!"
except LogAnalyzerError as err:
# Cleanup Config DB if rule creation failed
logger.error("ACL rule application failed, attempting to clean-up...")
self.teardown_rules(duthost)
raise err
try:
yield
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE]
with loganalyzer:
logger.info("Removing ACL rules")
self.teardown_rules(duthost)
@pytest.yield_fixture(scope="class", autouse=True)
def counters_sanity_check(self, duthosts, acl_rules, acl_table):
"""Validate that the counters for each rule in the rules list increased as expected.
This fixture yields a list of rule IDs. The test case should add on to this list if
it is required to check the rule for increased counters.
After the test cases pass, the fixture will wait for the ACL counters to update and then
check if the counters for each rule in the list were increased.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
acl_rules: Fixture that sets up the ACL rules.
acl_table: Fixture that sets up the ACL table.
"""
acl_facts = defaultdict(dict)
table_name = acl_table["table_name"]
for duthost in duthosts:
acl_facts[duthost]['before']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
rule_list = []
yield rule_list
if not rule_list:
return
# Wait for orchagent to update the ACL counters
time.sleep(self.ACL_COUNTERS_UPDATE_INTERVAL_SECS)
for duthost in duthosts:
acl_facts[duthost]['after']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
for duthost in duthosts:
assert len(acl_facts[duthost]['before']) == len(acl_facts[duthost]['after'])
for rule in rule_list:
rule = "RULE_{}".format(rule)
counters_before = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_before[PACKETS_COUNT] += acl_facts[duthost]['before'][rule][PACKETS_COUNT]
counters_before[BYTES_COUNT] += acl_facts[duthost]['before'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" before traffic:\n{}"
.format(rule, pprint.pformat(counters_before)))
counters_after = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_after[PACKETS_COUNT] += acl_facts[duthost]['after'][rule][PACKETS_COUNT]
counters_after[BYTES_COUNT] += acl_facts[duthost]['after'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" after traffic:\n{}"
.format(rule, pprint.pformat(counters_after)))
assert counters_after[PACKETS_COUNT] > counters_before[PACKETS_COUNT]
assert counters_after[BYTES_COUNT] > counters_before[BYTES_COUNT]
@pytest.fixture(params=["downlink->uplink", "uplink->downlink"])
def direction(self, request):
"""Parametrize test based on direction of traffic."""
return request.param
def check_rule_counters(self, duthost):
logger.info('Wait all rule counters are ready')
return wait_until(60, 2, 0, self.check_rule_counters_internal, duthost)
def check_rule_counters_internal(self, duthost):
for asic_id in duthost.get_frontend_asic_ids():
res = duthost.asic_instance(asic_id).command('aclshow -a')
num_of_lines = len(res['stdout'].split('\n'))
if num_of_lines <= 2 or 'N/A' in res['stdout']:
return False
return True
@pytest.fixture(autouse=True)
def get_src_port(self, setup, direction):
"""Get a source port for the current test."""
src_ports = setup["downstream_port_ids"] if direction == "downlink->uplink" else setup["upstream_port_ids"]
src_port = random.choice(src_ports)
logger.info("Selected source port {}".format(src_port))
self.src_port = src_port
def get_dst_ports(self, setup, direction):
"""Get the set of possible destination ports for the current test."""
return setup["upstream_port_ids"] if direction == "downlink->uplink" else setup["downstream_port_ids"]
def get_dst_ip(self, direction, ip_version):
"""Get the default destination IP for the current test."""
return UPSTREAM_DST_IP[ip_version] if direction == "downlink->uplink" else DOWNSTREAM_DST_IP[ip_version]
def tcp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, proto=None, sport=0x4321, dport=0x51, flags=None):
"""Generate a TCP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
pkt = testutils.simple_tcp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64
)
if proto:
pkt["IP"].proto = proto
else:
pkt = testutils.simple_tcpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ipv6_hlim=64
)
if proto:
pkt["IPv6"].nh = proto
if flags:
pkt["TCP"].flags = flags
return pkt
def udp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, sport=1234, dport=80):
"""Generate a UDP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_udp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ip_ttl=64
)
else:
return testutils.simple_udpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ipv6_hlim=64
)
def icmp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, icmp_type=8, icmp_code=0):
"""Generate an ICMP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_icmp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ip_ttl=64,
)
else:
return testutils.simple_icmpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ipv6_hlim=64,
)
def expected_mask_routed_packet(self, pkt, ip_version):
"""Generate the expected mask for a routed packet."""
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, "dst")
exp_pkt.set_do_not_care_scapy(packet.Ether, "src")
if ip_version == "ipv4":
exp_pkt.set_do_not_care_scapy(packet.IP, "chksum")
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
else:
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
return exp_pkt
def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that unmatched packets are dropped for ingress."""
if stage == "egress":
pytest.skip("Only run for ingress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that default egress rule allow all traffics"""
if stage == "ingress":
pytest.skip("Only run for egress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
def test_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on source IP."""
src_ip = "20.0.0.2" if ip_version == "ipv4" else "60c0:a800::6"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(1)
def test_rules_priority_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the forwarding case."""
src_ip = "20.0.0.7" if ip_version == "ipv4" else "60c0:a800::7"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(20)
def test_rules_priority_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the drop case."""
src_ip = "20.0.0.3" if ip_version == "ipv4" else "60c0:a800::4"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(7)
def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_ALLOW[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_ALLOW[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(2 if direction == "uplink->downlink" else 3)
def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_BLOCK[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_BLOCK[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(15 if direction == "uplink->downlink" else 16)
def test_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on source IP."""
src_ip = "20.0.0.6" if ip_version == "ipv4" else "60c0:a800::3"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(14)
def test_udp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a UDP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(13)
def test_udp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a UDP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(26)
def test_icmp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop an ICMP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(25)
def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward an ICMP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(12)
def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x1217)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x120D)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(4)
def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x123B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(11)
def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x123A)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(10)
def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(17)
def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7E)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x1B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(6)
def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(10)
def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7F)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(18)
def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x24)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(5)
def test_icmp_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
src_ip = "20.0.0.10" if ip_version == "ipv4" else "60c0:a800::10"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(29)
def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version):
exp_pkt = self.expected_mask_routed_packet(pkt, ip_version)
if ip_version == "ipv4":
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IP].dst)
else:
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IPv6].dst)
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, self.src_port, pkt)
if direction == "uplink->downlink" and downstream_dst_port:
if dropped:
testutils.verify_no_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
testutils.verify_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
if dropped:
testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction))
else:
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction),
timeout=20)
class TestBasicAcl(BaseAclTest):
"""Test Basic functionality of ACL rules (i.e. setup with full update on a running device)."""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating basic ACL rules config for ACL table \"{}\" on {}".format(table_name, dut))
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}.json".format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]),
dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update full {}".format(dut_conf_file_path))
class TestIncrementalAcl(BaseAclTest):
"""Test ACL rule functionality with an incremental configuration.
Verify that everything still works as expected when an ACL configuration is applied in
multiple parts.
"""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating incremental ACL rules config for ACL table \"{}\""
.format(table_name))
for part, config_file in enumerate(ACL_RULES_PART_TEMPLATES[ip_version]):
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}_part_{}.json".format(table_name, part))
dut.template(src=os.path.join(TEMPLATE_DIR, config_file), dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update incremental {}".format(dut_conf_file_path))
@pytest.mark.reboot
class TestAclWithReboot(TestBasicAcl):
"""Test ACL rule functionality with a reboot.
Verify that configuration persists correctly after reboot and is applied properly
upon startup.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Save configuration and reboot after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
dut.command("config save -y")
reboot(dut, localhost, wait=240)
# We need some additional delay on e1031
if dut.facts["platform"] == "x86_64-cel_e1031-r0":
time.sleep(240)
populate_vlan_arp_entries()
@pytest.mark.port_toggle
class TestAclWithPortToggle(TestBasicAcl):
"""Test ACL rule functionality after toggling ports.
Verify that ACLs still function as expected after links flap.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Toggle ports after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
port_toggle(dut, tbinfo)
populate_vlan_arp_entries()
|
@pytest.fixture(scope="module")
def acl_table(duthosts, rand_one_dut_hostname, setup, stage, ip_version):
"""Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
ip_version: The IP version under test
Yields:
The ACL table configuration.
"""
table_name = "DATA_{}_{}_TEST".format(stage.upper(), ip_version.upper())
acl_table_config = {
"table_name": table_name,
"table_ports": ",".join(setup["acl_table_ports"]['']),
"table_stage": stage,
"table_type": "L3" if ip_version == "ipv4" else "L3V6"
}
logger.info("Generated ACL table configuration:\n{}".format(pprint.pformat(acl_table_config)))
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "add")
except LogAnalyzerError as err:
# Cleanup Config DB if table creation failed
logger.error("ACL table creation failed, attempting to clean-up...")
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
raise err
try:
yield acl_table_config
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_REMOVE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
| 324 | 372 |
from tests.common import reboot, port_toggle
import os
import time
import random
import logging
import pprint
import pytest
import json
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from tests.common import reboot, port_toggle
from tests.common.helpers.assertions import pytest_require
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
from tests.common.fixtures.duthost_utils import backup_and_restore_config_db_on_duts
from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses
from tests.common.utilities import wait_until
from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.acl,
pytest.mark.disable_loganalyzer, # Disable automatic loganalyzer, since we use it for the test
pytest.mark.topology("any"),
pytest.mark.usefixtures('backup_and_restore_config_db_on_duts')
]
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = "acl_test_dir" # Keep it under home dir so it persists through reboot
FILES_DIR = os.path.join(BASE_DIR, "files")
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
ACL_TABLE_TEMPLATE = "acltb_table.j2"
ACL_REMOVE_RULES_FILE = "acl_rules_del.json"
# TODO: We really shouldn't have two separate templates for v4 and v6, need to combine them somehow
ACL_RULES_FULL_TEMPLATE = {
"ipv4": "acltb_test_rules.j2",
"ipv6": "acltb_v6_test_rules.j2"
}
ACL_RULES_PART_TEMPLATES = {
"ipv4": tuple("acltb_test_rules_part_{}.j2".format(i) for i in xrange(1, 3)),
"ipv6": tuple("acltb_v6_test_rules_part_{}.j2".format(i) for i in xrange(1, 3))
}
DEFAULT_SRC_IP = {
"ipv4": "20.0.0.1",
"ipv6": "60c0:a800::5"
}
# TODO: These routes don't match the VLAN interface from the T0 topology.
# This needs to be addressed before we can enable the v6 tests for T0
DOWNSTREAM_DST_IP = {
"ipv4": "192.168.0.253",
"ipv6": "20c0:a800::2"
}
DOWNSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.0.252",
"ipv6": "20c0:a800::4"
}
DOWNSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.0.251",
"ipv6": "20c0:a800::8"
}
DOWNSTREAM_IP_PORT_MAP = {}
UPSTREAM_DST_IP = {
"ipv4": "192.168.128.1",
"ipv6": "40c0:a800::2"
}
UPSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.136.1",
"ipv6": "40c0:a800::4"
}
UPSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.144.1",
"ipv6": "40c0:a800::8"
}
VLAN_BASE_MAC_PATTERN = "72060001{:04}"
LOG_EXPECT_ACL_TABLE_CREATE_RE = ".*Created ACL table.*"
LOG_EXPECT_ACL_TABLE_REMOVE_RE = ".*Successfully deleted ACL table.*"
LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*"
LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*"
PACKETS_COUNT = "packets_count"
BYTES_COUNT = "bytes_count"
@pytest.fixture(scope="module")
def setup(duthosts, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter):
"""Gather all required test information from DUT and tbinfo.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
tbinfo: A fixture to gather information about the testbed.
Yields:
A Dictionary with required test information.
"""
mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["type"]
vlan_ports = []
vlan_mac = None
if topo == "t0":
vlan_ports = [mg_facts["minigraph_ptf_indices"][ifname]
for ifname in mg_facts["minigraph_vlans"].values()[0]["members"]]
config_facts = rand_selected_dut.get_running_config_facts()
vlan_table = config_facts["VLAN"]
vlan_name = list(vlan_table.keys())[0]
if "mac" in vlan_table[vlan_name]:
vlan_mac = vlan_table[vlan_name]["mac"]
# Get the list of upstream/downstream ports
downstream_ports = defaultdict(list)
upstream_ports = defaultdict(list)
downstream_port_ids = []
upstream_port_ids = []
upstream_port_id_to_router_mac_map = {}
downstream_port_id_to_router_mac_map = {}
# For T0/dual ToR testbeds, we need to use the VLAN MAC to interact with downstream ports
# For T1 testbeds, no VLANs are present so using the router MAC is acceptable
downlink_dst_mac = vlan_mac if vlan_mac is not None else rand_selected_dut.facts["router_mac"]
for interface, neighbor in mg_facts["minigraph_neighbors"].items():
port_id = mg_facts["minigraph_ptf_indices"][interface]
if (topo == "t1" and "T0" in neighbor["name"]) or (topo == "t0" and "Server" in neighbor["name"]):
downstream_ports[neighbor['namespace']].append(interface)
downstream_port_ids.append(port_id)
downstream_port_id_to_router_mac_map[port_id] = downlink_dst_mac
elif (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
upstream_ports[neighbor['namespace']].append(interface)
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_selected_dut.facts["router_mac"]
# stop garp service for single tor
if 'dualtor' not in tbinfo['topo']['name']:
logging.info("Stopping GARP service on single tor")
ptfhost.shell("supervisorctl stop garp_service", module_ignore_errors=True)
# If running on a dual ToR testbed, any uplink for either ToR is an acceptable
# source or destination port
if 'dualtor' in tbinfo['topo']['name'] and rand_unselected_dut is not None:
peer_mg_facts = rand_unselected_dut.get_extended_minigraph_facts(tbinfo)
for interface, neighbor in peer_mg_facts['minigraph_neighbors'].items():
if (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
port_id = peer_mg_facts["minigraph_ptf_indices"][interface]
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_unselected_dut.facts["router_mac"]
# Get the list of LAGs
port_channels = mg_facts["minigraph_portchannels"]
# TODO: We should make this more robust (i.e. bind all active front-panel ports)
acl_table_ports = defaultdict(list)
if topo == "t0" or tbinfo["topo"]["name"] in ("t1", "t1-lag"):
for namespace, port in downstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
if topo == "t0" or tbinfo["topo"]["name"] in ("t1-lag", "t1-64-lag", "t1-64-lag-clet"):
for k, v in port_channels.iteritems():
acl_table_ports[v['namespace']].append(k)
# In multi-asic we need config both in host and namespace.
if v['namespace']:
acl_table_ports[''].append(k)
else:
for namespace, port in upstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
dest_mac_mapping = {
"downlink->uplink": downstream_port_id_to_router_mac_map,
"uplink->downlink": upstream_port_id_to_router_mac_map
}
setup_information = {
"destination_mac": dest_mac_mapping,
"downstream_port_ids": downstream_port_ids,
"upstream_port_ids": upstream_port_ids,
"acl_table_ports": acl_table_ports,
"vlan_ports": vlan_ports,
"topo": topo,
"vlan_mac": vlan_mac
}
logger.info("Gathered variables for ACL test:\n{}".format(pprint.pformat(setup_information)))
logger.info("Creating temporary folder \"{}\" for ACL test".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("mkdir -p {}".format(DUT_TMP_DIR))
yield setup_information
logger.info("Removing temporary directory \"{}\"".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("rm -rf {}".format(DUT_TMP_DIR))
@pytest.fixture(scope="module", params=["ipv4", "ipv6"])
def ip_version(request, tbinfo, duthosts, rand_one_dut_hostname):
if tbinfo["topo"]["type"] == "t0" and request.param == "ipv6":
pytest.skip("IPV6 ACL test not currently supported on t0 testbeds")
return request.param
@pytest.fixture(scope="module")
def populate_vlan_arp_entries(setup, ptfhost, duthosts, rand_one_dut_hostname, ip_version):
"""Set up the ARP responder utility in the PTF container."""
duthost = duthosts[rand_one_dut_hostname]
if setup["topo"] != "t0":
def noop():
pass
yield noop
return # Don't fall through to t0 case
addr_list = [DOWNSTREAM_DST_IP[ip_version], DOWNSTREAM_IP_TO_ALLOW[ip_version], DOWNSTREAM_IP_TO_BLOCK[ip_version]]
vlan_host_map = defaultdict(dict)
for i in range(len(addr_list)):
mac = VLAN_BASE_MAC_PATTERN.format(i)
port = random.choice(setup["vlan_ports"])
addr = addr_list[i]
vlan_host_map[port][str(addr)] = mac
DOWNSTREAM_IP_PORT_MAP[addr] = port
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
with open("/tmp/from_t1.json", "w") as ar_config:
json.dump(arp_responder_conf, ar_config)
ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
ptfhost.template(src="templates/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell("supervisorctl reread && supervisorctl update")
ptfhost.shell("supervisorctl restart arp_responder")
def populate_arp_table():
for dut in duthosts:
dut.command("sonic-clear fdb all")
dut.command("sonic-clear arp")
# Wait some time to ensure the async call of clear is completed
time.sleep(20)
for addr in addr_list:
dut.command("ping {} -c 3".format(addr), module_ignore_errors=True)
populate_arp_table()
yield populate_arp_table
logging.info("Stopping ARP responder")
ptfhost.shell("supervisorctl stop arp_responder")
duthost.command("sonic-clear fdb all")
duthost.command("sonic-clear arp")
@pytest.fixture(scope="module", params=["ingress", "egress"])
def stage(request, duthosts, rand_one_dut_hostname):
"""Parametrize tests for Ingress/Egress stage testing.
Args:
request: A fixture to interact with Pytest data.
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
Returns:
str: The ACL stage to be tested.
"""
duthost = duthosts[rand_one_dut_hostname]
pytest_require(
request.param == "ingress" or duthost.facts["asic_type"] not in ("broadcom"),
"Egress ACLs are not currently supported on \"{}\" ASICs".format(duthost.facts["asic_type"])
)
return request.param
def create_or_remove_acl_table(duthost, acl_table_config, setup, op):
for sonic_host_or_asic_inst in duthost.get_sonic_host_and_frontend_asic_instance():
namespace = sonic_host_or_asic_inst.namespace if hasattr(sonic_host_or_asic_inst, 'namespace') else ''
if op == "add":
logger.info("Creating ACL table: \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command(
"config acl add table {} {} -s {} -p {}".format(
acl_table_config["table_name"],
acl_table_config["table_type"],
acl_table_config["table_stage"],
",".join(setup["acl_table_ports"][namespace]),
)
)
else:
logger.info("Removing ACL table \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command("config acl remove table {}".format(acl_table_config["table_name"]))
@pytest.fixture(scope="module")
def acl_table(duthosts, rand_one_dut_hostname, setup, stage, ip_version):
"""Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
ip_version: The IP version under test
Yields:
The ACL table configuration.
"""
table_name = "DATA_{}_{}_TEST".format(stage.upper(), ip_version.upper())
acl_table_config = {
"table_name": table_name,
"table_ports": ",".join(setup["acl_table_ports"]['']),
"table_stage": stage,
"table_type": "L3" if ip_version == "ipv4" else "L3V6"
}
logger.info("Generated ACL table configuration:\n{}".format(pprint.pformat(acl_table_config)))
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "add")
except LogAnalyzerError as err:
# Cleanup Config DB if table creation failed
logger.error("ACL table creation failed, attempting to clean-up...")
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
raise err
try:
yield acl_table_config
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_REMOVE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
class BaseAclTest(object):
"""Base class for testing ACL rules.
Subclasses must provide `setup_rules` method to prepare ACL rules for traffic testing.
They can optionally override `teardown_rules`, which will otherwise remove the rules by
applying an empty configuration file.
"""
__metaclass__ = ABCMeta
ACL_COUNTERS_UPDATE_INTERVAL_SECS = 10
@abstractmethod
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
pass
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Perform actions after rules have been applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
pass
def teardown_rules(self, dut):
"""Tear down ACL rules once the tests have completed.
Args:
dut: The DUT having ACLs applied.
"""
logger.info("Finished with tests, removing all ACL rules...")
# Copy empty rules configuration
dut.copy(src=os.path.join(FILES_DIR, ACL_REMOVE_RULES_FILE), dest=DUT_TMP_DIR)
remove_rules_dut_path = os.path.join(DUT_TMP_DIR, ACL_REMOVE_RULES_FILE)
# Remove the rules
logger.info("Applying \"{}\"".format(remove_rules_dut_path))
dut.command("config acl update full {}".format(remove_rules_dut_path))
@pytest.fixture(scope="class", autouse=True)
def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_entries, tbinfo, ip_version):
"""Setup/teardown ACL rules for the current set of tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
localhost: The host from which tests are run.
setup: Parameters for the ACL tests.
acl_table: Configuration info for the ACL table.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl_rules")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE]
with loganalyzer:
self.setup_rules(duthost, acl_table, ip_version)
self.post_setup_hook(duthost, localhost, populate_vlan_arp_entries, tbinfo)
assert self.check_rule_counters(duthost), "Rule counters should be ready!"
except LogAnalyzerError as err:
# Cleanup Config DB if rule creation failed
logger.error("ACL rule application failed, attempting to clean-up...")
self.teardown_rules(duthost)
raise err
try:
yield
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE]
with loganalyzer:
logger.info("Removing ACL rules")
self.teardown_rules(duthost)
@pytest.yield_fixture(scope="class", autouse=True)
def counters_sanity_check(self, duthosts, acl_rules, acl_table):
"""Validate that the counters for each rule in the rules list increased as expected.
This fixture yields a list of rule IDs. The test case should add on to this list if
it is required to check the rule for increased counters.
After the test cases pass, the fixture will wait for the ACL counters to update and then
check if the counters for each rule in the list were increased.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
acl_rules: Fixture that sets up the ACL rules.
acl_table: Fixture that sets up the ACL table.
"""
acl_facts = defaultdict(dict)
table_name = acl_table["table_name"]
for duthost in duthosts:
acl_facts[duthost]['before']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
rule_list = []
yield rule_list
if not rule_list:
return
# Wait for orchagent to update the ACL counters
time.sleep(self.ACL_COUNTERS_UPDATE_INTERVAL_SECS)
for duthost in duthosts:
acl_facts[duthost]['after']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
for duthost in duthosts:
assert len(acl_facts[duthost]['before']) == len(acl_facts[duthost]['after'])
for rule in rule_list:
rule = "RULE_{}".format(rule)
counters_before = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_before[PACKETS_COUNT] += acl_facts[duthost]['before'][rule][PACKETS_COUNT]
counters_before[BYTES_COUNT] += acl_facts[duthost]['before'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" before traffic:\n{}"
.format(rule, pprint.pformat(counters_before)))
counters_after = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_after[PACKETS_COUNT] += acl_facts[duthost]['after'][rule][PACKETS_COUNT]
counters_after[BYTES_COUNT] += acl_facts[duthost]['after'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" after traffic:\n{}"
.format(rule, pprint.pformat(counters_after)))
assert counters_after[PACKETS_COUNT] > counters_before[PACKETS_COUNT]
assert counters_after[BYTES_COUNT] > counters_before[BYTES_COUNT]
@pytest.fixture(params=["downlink->uplink", "uplink->downlink"])
def direction(self, request):
"""Parametrize test based on direction of traffic."""
return request.param
def check_rule_counters(self, duthost):
logger.info('Wait all rule counters are ready')
return wait_until(60, 2, 0, self.check_rule_counters_internal, duthost)
def check_rule_counters_internal(self, duthost):
for asic_id in duthost.get_frontend_asic_ids():
res = duthost.asic_instance(asic_id).command('aclshow -a')
num_of_lines = len(res['stdout'].split('\n'))
if num_of_lines <= 2 or 'N/A' in res['stdout']:
return False
return True
@pytest.fixture(autouse=True)
def get_src_port(self, setup, direction):
"""Get a source port for the current test."""
src_ports = setup["downstream_port_ids"] if direction == "downlink->uplink" else setup["upstream_port_ids"]
src_port = random.choice(src_ports)
logger.info("Selected source port {}".format(src_port))
self.src_port = src_port
def get_dst_ports(self, setup, direction):
"""Get the set of possible destination ports for the current test."""
return setup["upstream_port_ids"] if direction == "downlink->uplink" else setup["downstream_port_ids"]
def get_dst_ip(self, direction, ip_version):
"""Get the default destination IP for the current test."""
return UPSTREAM_DST_IP[ip_version] if direction == "downlink->uplink" else DOWNSTREAM_DST_IP[ip_version]
def tcp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, proto=None, sport=0x4321, dport=0x51, flags=None):
"""Generate a TCP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
pkt = testutils.simple_tcp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64
)
if proto:
pkt["IP"].proto = proto
else:
pkt = testutils.simple_tcpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ipv6_hlim=64
)
if proto:
pkt["IPv6"].nh = proto
if flags:
pkt["TCP"].flags = flags
return pkt
def udp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, sport=1234, dport=80):
"""Generate a UDP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_udp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ip_ttl=64
)
else:
return testutils.simple_udpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ipv6_hlim=64
)
def icmp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, icmp_type=8, icmp_code=0):
"""Generate an ICMP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_icmp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ip_ttl=64,
)
else:
return testutils.simple_icmpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ipv6_hlim=64,
)
def expected_mask_routed_packet(self, pkt, ip_version):
"""Generate the expected mask for a routed packet."""
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, "dst")
exp_pkt.set_do_not_care_scapy(packet.Ether, "src")
if ip_version == "ipv4":
exp_pkt.set_do_not_care_scapy(packet.IP, "chksum")
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
else:
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
return exp_pkt
def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that unmatched packets are dropped for ingress."""
if stage == "egress":
pytest.skip("Only run for ingress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that default egress rule allow all traffics"""
if stage == "ingress":
pytest.skip("Only run for egress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
def test_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on source IP."""
src_ip = "20.0.0.2" if ip_version == "ipv4" else "60c0:a800::6"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(1)
def test_rules_priority_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the forwarding case."""
src_ip = "20.0.0.7" if ip_version == "ipv4" else "60c0:a800::7"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(20)
def test_rules_priority_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the drop case."""
src_ip = "20.0.0.3" if ip_version == "ipv4" else "60c0:a800::4"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(7)
def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_ALLOW[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_ALLOW[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(2 if direction == "uplink->downlink" else 3)
def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_BLOCK[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_BLOCK[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(15 if direction == "uplink->downlink" else 16)
def test_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on source IP."""
src_ip = "20.0.0.6" if ip_version == "ipv4" else "60c0:a800::3"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(14)
def test_udp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a UDP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(13)
def test_udp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a UDP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(26)
def test_icmp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop an ICMP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(25)
def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward an ICMP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(12)
def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x1217)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x120D)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(4)
def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x123B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(11)
def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x123A)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(10)
def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(17)
def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7E)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x1B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(6)
def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(10)
def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7F)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(18)
def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x24)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(5)
def test_icmp_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
src_ip = "20.0.0.10" if ip_version == "ipv4" else "60c0:a800::10"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(29)
def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version):
exp_pkt = self.expected_mask_routed_packet(pkt, ip_version)
if ip_version == "ipv4":
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IP].dst)
else:
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IPv6].dst)
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, self.src_port, pkt)
if direction == "uplink->downlink" and downstream_dst_port:
if dropped:
testutils.verify_no_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
testutils.verify_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
if dropped:
testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction))
else:
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction),
timeout=20)
class TestBasicAcl(BaseAclTest):
"""Test Basic functionality of ACL rules (i.e. setup with full update on a running device)."""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating basic ACL rules config for ACL table \"{}\" on {}".format(table_name, dut))
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}.json".format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]),
dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update full {}".format(dut_conf_file_path))
class TestIncrementalAcl(BaseAclTest):
"""Test ACL rule functionality with an incremental configuration.
Verify that everything still works as expected when an ACL configuration is applied in
multiple parts.
"""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating incremental ACL rules config for ACL table \"{}\""
.format(table_name))
for part, config_file in enumerate(ACL_RULES_PART_TEMPLATES[ip_version]):
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}_part_{}.json".format(table_name, part))
dut.template(src=os.path.join(TEMPLATE_DIR, config_file), dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update incremental {}".format(dut_conf_file_path))
@pytest.mark.reboot
class TestAclWithReboot(TestBasicAcl):
"""Test ACL rule functionality with a reboot.
Verify that configuration persists correctly after reboot and is applied properly
upon startup.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Save configuration and reboot after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
dut.command("config save -y")
reboot(dut, localhost, wait=240)
# We need some additional delay on e1031
if dut.facts["platform"] == "x86_64-cel_e1031-r0":
time.sleep(240)
populate_vlan_arp_entries()
@pytest.mark.port_toggle
class TestAclWithPortToggle(TestBasicAcl):
"""Test ACL rule functionality after toggling ports.
Verify that ACLs still function as expected after links flap.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Toggle ports after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
port_toggle(dut, tbinfo)
populate_vlan_arp_entries()
|
compute_ade
|
Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
|
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utilities to evaluate motion forecasting predictions and compute metrics."""
import numpy as np
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayNumber
# MASKED: compute_ade function (lines 9-22)
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
"""
# Compute final displacement error for all K trajectories
fde_vector = (forecasted_trajectories - gt_trajectory)[:, -1] # type: ignore
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=-1) # type: ignore
return fde
def compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber,
gt_trajectory: NDArrayNumber,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
"""
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = fde > miss_threshold_m # type: ignore
return is_missed_prediction
|
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
# (K,N)
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2) # type: ignore
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade
| 9 | 22 |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utilities to evaluate motion forecasting predictions and compute metrics."""
import numpy as np
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayNumber
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
# (K,N)
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2) # type: ignore
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
"""
# Compute final displacement error for all K trajectories
fde_vector = (forecasted_trajectories - gt_trajectory)[:, -1] # type: ignore
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=-1) # type: ignore
return fde
def compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber,
gt_trajectory: NDArrayNumber,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
"""
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = fde > miss_threshold_m # type: ignore
return is_missed_prediction
|
compute_fde
|
Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
|
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utilities to evaluate motion forecasting predictions and compute metrics."""
import numpy as np
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayNumber
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
# (K,N)
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2) # type: ignore
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade
# MASKED: compute_fde function (lines 25-38)
def compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber,
gt_trajectory: NDArrayNumber,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
"""
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = fde > miss_threshold_m # type: ignore
return is_missed_prediction
|
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
"""
# Compute final displacement error for all K trajectories
fde_vector = (forecasted_trajectories - gt_trajectory)[:, -1] # type: ignore
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=-1) # type: ignore
return fde
| 25 | 38 |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utilities to evaluate motion forecasting predictions and compute metrics."""
import numpy as np
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayNumber
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
# (K,N)
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2) # type: ignore
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
"""
# Compute final displacement error for all K trajectories
fde_vector = (forecasted_trajectories - gt_trajectory)[:, -1] # type: ignore
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=-1) # type: ignore
return fde
def compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber,
gt_trajectory: NDArrayNumber,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
"""
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = fde > miss_threshold_m # type: ignore
return is_missed_prediction
|
compute_is_missed_prediction
|
Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
|
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utilities to evaluate motion forecasting predictions and compute metrics."""
import numpy as np
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayNumber
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
# (K,N)
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2) # type: ignore
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
"""
# Compute final displacement error for all K trajectories
fde_vector = (forecasted_trajectories - gt_trajectory)[:, -1] # type: ignore
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=-1) # type: ignore
return fde
# MASKED: compute_is_missed_prediction function (lines 41-58)
|
def compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber,
gt_trajectory: NDArrayNumber,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
"""
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = fde > miss_threshold_m # type: ignore
return is_missed_prediction
| 41 | 58 |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utilities to evaluate motion forecasting predictions and compute metrics."""
import numpy as np
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayNumber
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
# (K,N)
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2) # type: ignore
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
"""
# Compute final displacement error for all K trajectories
fde_vector = (forecasted_trajectories - gt_trajectory)[:, -1] # type: ignore
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=-1) # type: ignore
return fde
def compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber,
gt_trajectory: NDArrayNumber,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
"""
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = fde > miss_threshold_m # type: ignore
return is_missed_prediction
|
undo_logger_setup
|
Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr))
|
import logging
import sys
import gym
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
requests_logger = logging.getLogger('requests')
# Set up the default handler
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
# We need to take in the gym logger explicitly since this is called
# at initialization time.
def logger_setup(gym_logger):
root_logger.addHandler(handler)
gym_logger.setLevel(logging.INFO)
# When set to INFO, this will print out the hostname of every
# connection it makes.
# requests_logger.setLevel(logging.WARN)
# MASKED: undo_logger_setup function (lines 25-37)
|
def undo_logger_setup():
"""Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr))
"""
root_logger.removeHandler(handler)
gym.logger.setLevel(logging.NOTSET)
requests_logger.setLevel(logging.NOTSET)
| 25 | 37 |
import logging
import sys
import gym
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
requests_logger = logging.getLogger('requests')
# Set up the default handler
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
# We need to take in the gym logger explicitly since this is called
# at initialization time.
def logger_setup(gym_logger):
root_logger.addHandler(handler)
gym_logger.setLevel(logging.INFO)
# When set to INFO, this will print out the hostname of every
# connection it makes.
# requests_logger.setLevel(logging.WARN)
def undo_logger_setup():
"""Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr))
"""
root_logger.removeHandler(handler)
gym.logger.setLevel(logging.NOTSET)
requests_logger.setLevel(logging.NOTSET)
|
_predict_var
|
predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
|
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self._no_exog_infl = True
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
self._no_exog_infl = False
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl)
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict response variable or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""NotImplemented
"""
raise NotImplementedError
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
raise NotImplementedError
# The below currently does not work, discontinuity at zero
# see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)
return ds
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, w)
return result[0] if transform else result
# MASKED: _predict_var function (lines 665-684)
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], 1 - w[:, None])
return distr
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization + 1
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization + 1
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
if y_values is None:
y_values = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedResults(CountResults):
def get_prediction(self, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', average=False,
agg_weights=None, y_values=None,
transform=True, row_labels=None):
import statsmodels.base._prediction_inference as pred
pred_kwds = {
'exog_infl': exog_infl,
'exposure': exposure,
'offset': offset,
'y_values': y_values,
}
res = pred.get_prediction_delta(self, exog=exog, which=which,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds)
return res
def get_influence(self):
"""
Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account.
"""
# same as sumper in DiscreteResults, only added for docstring
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
class ZeroInflatedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
|
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
w = prob_infl
var_ = (1 - w) * mu * (1 + w * mu)
return var_
| 665 | 684 |
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self._no_exog_infl = True
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
self._no_exog_infl = False
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl)
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict response variable or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""NotImplemented
"""
raise NotImplementedError
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
raise NotImplementedError
# The below currently does not work, discontinuity at zero
# see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)
return ds
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
w = prob_infl
var_ = (1 - w) * mu * (1 + w * mu)
return var_
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], 1 - w[:, None])
return distr
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization + 1
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization + 1
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
if y_values is None:
y_values = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedResults(CountResults):
def get_prediction(self, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', average=False,
agg_weights=None, y_values=None,
transform=True, row_labels=None):
import statsmodels.base._prediction_inference as pred
pred_kwds = {
'exog_infl': exog_infl,
'exposure': exposure,
'offset': offset,
'y_values': y_values,
}
res = pred.get_prediction_delta(self, exog=exog, which=which,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds)
return res
def get_influence(self):
"""
Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account.
"""
# same as sumper in DiscreteResults, only added for docstring
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
class ZeroInflatedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
|
_predict_var
|
predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
|
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self._no_exog_infl = True
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
self._no_exog_infl = False
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl)
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict response variable or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""NotImplemented
"""
raise NotImplementedError
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
raise NotImplementedError
# The below currently does not work, discontinuity at zero
# see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)
return ds
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
w = prob_infl
var_ = (1 - w) * mu * (1 + w * mu)
return var_
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], 1 - w[:, None])
return distr
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization + 1
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
# MASKED: _predict_var function (lines 803-824)
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization + 1
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
if y_values is None:
y_values = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedResults(CountResults):
def get_prediction(self, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', average=False,
agg_weights=None, y_values=None,
transform=True, row_labels=None):
import statsmodels.base._prediction_inference as pred
pred_kwds = {
'exog_infl': exog_infl,
'exposure': exposure,
'offset': offset,
'y_values': y_values,
}
res = pred.get_prediction_delta(self, exog=exog, which=which,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds)
return res
def get_influence(self):
"""
Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account.
"""
# same as sumper in DiscreteResults, only added for docstring
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
class ZeroInflatedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
|
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)
return var_
| 803 | 824 |
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self._no_exog_infl = True
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
self._no_exog_infl = False
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl)
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict response variable or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""NotImplemented
"""
raise NotImplementedError
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
raise NotImplementedError
# The below currently does not work, discontinuity at zero
# see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)
return ds
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
w = prob_infl
var_ = (1 - w) * mu * (1 + w * mu)
return var_
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], 1 - w[:, None])
return distr
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization + 1
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization + 1
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
if y_values is None:
y_values = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedResults(CountResults):
def get_prediction(self, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', average=False,
agg_weights=None, y_values=None,
transform=True, row_labels=None):
import statsmodels.base._prediction_inference as pred
pred_kwds = {
'exog_infl': exog_infl,
'exposure': exposure,
'offset': offset,
'y_values': y_values,
}
res = pred.get_prediction_delta(self, exog=exog, which=which,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds)
return res
def get_influence(self):
"""
Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account.
"""
# same as sumper in DiscreteResults, only added for docstring
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
class ZeroInflatedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
|
_predict_var
|
predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
|
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self._no_exog_infl = True
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
self._no_exog_infl = False
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl)
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict response variable or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""NotImplemented
"""
raise NotImplementedError
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
raise NotImplementedError
# The below currently does not work, discontinuity at zero
# see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)
return ds
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
w = prob_infl
var_ = (1 - w) * mu * (1 + w * mu)
return var_
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], 1 - w[:, None])
return distr
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization + 1
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization + 1
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
if y_values is None:
y_values = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
# MASKED: _predict_var function (lines 919-940)
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedResults(CountResults):
def get_prediction(self, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', average=False,
agg_weights=None, y_values=None,
transform=True, row_labels=None):
import statsmodels.base._prediction_inference as pred
pred_kwds = {
'exog_infl': exog_infl,
'exposure': exposure,
'offset': offset,
'y_values': y_values,
}
res = pred.get_prediction_delta(self, exog=exog, which=which,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds)
return res
def get_influence(self):
"""
Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account.
"""
# same as sumper in DiscreteResults, only added for docstring
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
class ZeroInflatedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
|
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
return var_
| 919 | 940 |
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self._no_exog_infl = True
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
self._no_exog_infl = False
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def _get_exogs(self):
"""list of exogs, for internal use in post-estimation
"""
return (self.exog, self.exog_infl)
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
# TODO: need to allow for complex to use CS numerical derivatives
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', y_values=None):
"""
Predict response variable or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"``
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
#if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':
if tmp_offset is False:
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
#if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
if tmp_exposure is False:
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'var':
mu = np.exp(lin_pred)
return self._predict_var(params, mu, 1 - prob_main)
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure,
offset, y_values=y_values)
else:
raise ValueError('which = %s is not available' % which)
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""NotImplemented
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""NotImplemented
"""
raise NotImplementedError
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = - mu[:, None] * score_infl
dmat_main = (1 - w[:, None]) * score_main
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
raise NotImplementedError
# The below currently does not work, discontinuity at zero
# see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)
return ds
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
w = prob_infl
var_ = (1 - w) * mu * (1 + w * mu)
return var_
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
"""Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass.
"""
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], 1 - w[:, None])
return distr
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization + 1
if y_values is None:
y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization + 1
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset,
y_values=None):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
if y_values is None:
y_values = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)
return result[0] if transform else result
def _predict_var(self, params, mu, prob_infl):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance.
"""
alpha = params[-1]
w = prob_infl
p = self.model_main.parameterization
var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)
return var_
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
@Appender(ZeroInflatedPoisson.get_distribution.__doc__)
def get_distribution(self, params, exog=None, exog_infl=None,
exposure=None, offset=None):
p = self.model_main.parameterization
mu = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="mean-main")
w = self.predict(params, exog=exog, exog_infl=exog_infl,
exposure=exposure, offset=offset, which="prob-main")
distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])
return distr
class ZeroInflatedResults(CountResults):
def get_prediction(self, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean', average=False,
agg_weights=None, y_values=None,
transform=True, row_labels=None):
import statsmodels.base._prediction_inference as pred
pred_kwds = {
'exog_infl': exog_infl,
'exposure': exposure,
'offset': offset,
'y_values': y_values,
}
res = pred.get_prediction_delta(self, exog=exog, which=which,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds)
return res
def get_influence(self):
"""
Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account.
"""
# same as sumper in DiscreteResults, only added for docstring
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self)
class ZeroInflatedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(ZeroInflatedResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
|
reproduce
|
Reproduce the specified experiments.
Args:
revs: If revs is not specified, all stashed experiments will be
reproduced.
keep_stash: If True, stashed experiments will be preserved if they
fail to reproduce successfully.
|
import logging
import os
import re
import signal
from collections import defaultdict, namedtuple
from concurrent.futures import CancelledError, ProcessPoolExecutor, wait
from contextlib import contextmanager
from functools import wraps
from multiprocessing import Manager
from typing import Iterable, Mapping, Optional
from funcy import cached_property, first
from dvc.dvcfile import is_lock_file
from dvc.exceptions import DvcException
from dvc.path_info import PathInfo
from dvc.stage.run import CheckpointKilledError
from dvc.utils import relpath
from .base import (
EXEC_BASELINE,
EXEC_CHECKPOINT,
EXEC_HEAD,
EXEC_MERGE,
EXEC_NAMESPACE,
EXPS_NAMESPACE,
EXPS_STASH,
BaselineMismatchError,
CheckpointExistsError,
ExperimentExistsError,
ExpRefInfo,
MultipleBranchError,
)
from .executor import BaseExecutor, LocalExecutor
from .utils import exp_refs_by_rev
logger = logging.getLogger(__name__)
def scm_locked(f):
# Lock the experiments workspace so that we don't try to perform two
# different sequences of git operations at once
@wraps(f)
def wrapper(exp, *args, **kwargs):
with exp.scm_lock:
return f(exp, *args, **kwargs)
return wrapper
class Experiments:
"""Class that manages experiments in a DVC repo.
Args:
repo (dvc.repo.Repo): repo instance that these experiments belong to.
"""
STASH_EXPERIMENT_FORMAT = "dvc-exp:{rev}:{baseline_rev}:{name}"
STASH_EXPERIMENT_RE = re.compile(
r"(?:commit: )"
r"dvc-exp:(?P<rev>[0-9a-f]+):(?P<baseline_rev>[0-9a-f]+)"
r":(?P<name>[^~^:\\?\[\]*]*)"
r"(:(?P<branch>.+))?$"
)
BRANCH_RE = re.compile(
r"^(?P<baseline_rev>[a-f0-9]{7})-(?P<exp_sha>[a-f0-9]+)"
r"(?P<checkpoint>-checkpoint)?$"
)
LAST_CHECKPOINT = ":last"
StashEntry = namedtuple(
"StashEntry", ["index", "rev", "baseline_rev", "branch", "name"]
)
def __init__(self, repo):
from dvc.lock import make_lock
from dvc.scm.base import NoSCMError
if repo.config["core"].get("no_scm", False):
raise NoSCMError
self.repo = repo
self.scm_lock = make_lock(
os.path.join(self.repo.tmp_dir, "exp_scm_lock"),
tmp_dir=self.repo.tmp_dir,
)
@property
def scm(self):
return self.repo.scm
@cached_property
def dvc_dir(self):
return relpath(self.repo.dvc_dir, self.repo.scm.root_dir)
@contextmanager
def chdir(self):
yield
@cached_property
def args_file(self):
return os.path.join(self.repo.tmp_dir, BaseExecutor.PACKED_ARGS_FILE)
@cached_property
def stash(self):
from dvc.scm.git import Stash
return Stash(self.scm, EXPS_STASH)
@property
def stash_revs(self):
revs = {}
for i, entry in enumerate(self.stash):
msg = entry.message.decode("utf-8").strip()
m = self.STASH_EXPERIMENT_RE.match(msg)
if m:
revs[entry.new_sha.decode("utf-8")] = self.StashEntry(
i,
m.group("rev"),
m.group("baseline_rev"),
m.group("branch"),
m.group("name"),
)
return revs
def _stash_exp(
self,
*args,
params: Optional[dict] = None,
detach_rev: Optional[str] = None,
baseline_rev: Optional[str] = None,
branch: Optional[str] = None,
name: Optional[str] = None,
**kwargs,
):
"""Stash changes from the workspace as an experiment.
Args:
params: Optional dictionary of parameter values to be used.
Values take priority over any parameters specified in the
user's workspace.
baseline_rev: Optional baseline rev for this experiment, defaults
to the current SCM rev.
branch: Optional experiment branch name. If specified, the
experiment will be added to `branch` instead of creating
a new branch.
name: Optional experiment name. If specified this will be used as
the human-readable name in the experiment branch ref. Has no
effect of branch is specified.
"""
with self.scm.stash_workspace(
include_untracked=detach_rev or branch
) as workspace:
# If we are not extending an existing branch, apply current
# workspace changes to be made in new branch
if not (branch or detach_rev) and workspace:
self.stash.apply(workspace)
self._prune_lockfiles()
# checkout and detach at branch (or current HEAD)
if detach_rev:
head = detach_rev
elif branch:
head = branch
else:
head = None
with self.scm.detach_head(head) as rev:
if baseline_rev is None:
baseline_rev = rev
# update experiment params from command line
if params:
self._update_params(params)
# save additional repro command line arguments
self._pack_args(*args, **kwargs)
# save experiment as a stash commit
msg = self._stash_msg(
rev, baseline_rev=baseline_rev, branch=branch, name=name
)
stash_rev = self.stash.push(message=msg)
logger.debug(
(
"Stashed experiment '%s' with baseline '%s' "
"for future execution."
),
stash_rev[:7],
baseline_rev[:7],
)
# Reset any changes before prior workspace is unstashed
self.scm.reset(hard=True)
return stash_rev
def _prune_lockfiles(self):
# NOTE: dirty DVC lock files must be restored to index state to
# avoid checking out incorrect persist or checkpoint outs
tree = self.scm.get_tree("HEAD")
lock_files = [
str(fname)
for fname in tree.walk_files(self.scm.root_dir)
if is_lock_file(fname)
]
if lock_files:
self.scm.reset(paths=lock_files)
self.scm.checkout_paths(lock_files, force=True)
def _stash_msg(
self,
rev: str,
baseline_rev: str,
branch: Optional[str] = None,
name: Optional[str] = None,
):
if not baseline_rev:
baseline_rev = rev
msg = self.STASH_EXPERIMENT_FORMAT.format(
rev=rev, baseline_rev=baseline_rev, name=name if name else ""
)
if branch:
return f"{msg}:{branch}"
return msg
def _pack_args(self, *args, **kwargs):
BaseExecutor.pack_repro_args(self.args_file, *args, **kwargs)
self.scm.add(self.args_file)
def _update_params(self, params: dict):
"""Update experiment params files with the specified values."""
from benedict import benedict
from dvc.utils.serialize import MODIFIERS
logger.debug("Using experiment params '%s'", params)
for params_fname in params:
path = PathInfo(self.repo.root_dir) / params_fname
suffix = path.suffix.lower()
modify_data = MODIFIERS[suffix]
with modify_data(path, tree=self.repo.tree) as data:
benedict(data).merge(params[params_fname], overwrite=True)
# Force params file changes to be staged in git
# Otherwise in certain situations the changes to params file may be
# ignored when we `git stash` them since mtime is used to determine
# whether the file is dirty
self.scm.add(list(params.keys()))
def reproduce_one(self, queue=False, **kwargs):
"""Reproduce and checkout a single experiment."""
stash_rev = self.new(**kwargs)
if queue:
logger.info(
"Queued experiment '%s' for future execution.", stash_rev[:7]
)
return [stash_rev]
results = self.reproduce([stash_rev], keep_stash=False)
exp_rev = first(results)
if exp_rev is not None:
self._log_reproduced(results)
return results
def reproduce_queued(self, **kwargs):
results = self.reproduce(**kwargs)
if results:
self._log_reproduced(results)
return results
def _log_reproduced(self, revs: Iterable[str]):
names = []
for rev in revs:
name = self.get_exact_name(rev)
names.append(name if name else rev[:7])
fmt = (
"\nReproduced experiment(s): %s\n"
"To promote an experiment to a Git branch run:\n\n"
"\tdvc exp branch <exp>\n\n"
"To apply the results of an experiment to your workspace run:\n\n"
"\tdvc exp apply <exp>"
)
logger.info(fmt, ", ".join(names))
@scm_locked
def new(
self, *args, checkpoint_resume: Optional[str] = None, **kwargs,
):
"""Create a new experiment.
Experiment will be reproduced and checked out into the user's
workspace.
"""
if checkpoint_resume is not None:
return self._resume_checkpoint(
*args, checkpoint_resume=checkpoint_resume, **kwargs
)
return self._stash_exp(*args, **kwargs)
def _resume_checkpoint(
self, *args, checkpoint_resume: Optional[str] = None, **kwargs,
):
"""Resume an existing (checkpoint) experiment.
Experiment will be reproduced and checked out into the user's
workspace.
"""
assert checkpoint_resume
if checkpoint_resume == self.LAST_CHECKPOINT:
# Continue from most recently committed checkpoint
resume_rev = self._get_last_checkpoint()
else:
resume_rev = self.scm.resolve_rev(checkpoint_resume)
allow_multiple = "params" in kwargs
branch = self.get_branch_by_rev(
resume_rev, allow_multiple=allow_multiple
)
if not branch:
raise DvcException(
"Could not find checkpoint experiment "
f"'{checkpoint_resume}'"
)
baseline_rev = self._get_baseline(branch)
if kwargs.get("params", None):
logger.debug(
"Branching from checkpoint '%s' with modified params, "
"baseline '%s'",
checkpoint_resume,
baseline_rev[:7],
)
detach_rev = resume_rev
branch = None
else:
logger.debug(
"Continuing from tip of checkpoint '%s'", checkpoint_resume
)
detach_rev = None
return self._stash_exp(
*args,
detach_rev=detach_rev,
baseline_rev=baseline_rev,
branch=branch,
**kwargs,
)
def _get_last_checkpoint(self):
rev = self.scm.get_ref(EXEC_CHECKPOINT)
if rev:
return rev
raise DvcException("No existing checkpoint experiment to continue")
# MASKED: reproduce function (lines 357-419)
def _init_executors(self, to_run):
executors = {}
for stash_rev, item in to_run.items():
self.scm.set_ref(EXEC_HEAD, item.rev)
self.scm.set_ref(EXEC_MERGE, stash_rev)
self.scm.set_ref(EXEC_BASELINE, item.baseline_rev)
# Executor will be initialized with an empty git repo that
# we populate by pushing:
# EXEC_HEAD - the base commit for this experiment
# EXEC_MERGE - the unmerged changes (from our stash)
# to be reproduced
# EXEC_BASELINE - the baseline commit for this experiment
executor = LocalExecutor(
self.scm,
self.dvc_dir,
name=item.name,
branch=item.branch,
cache_dir=self.repo.cache.local.cache_dir,
)
executors[item.rev] = executor
for ref in (EXEC_HEAD, EXEC_MERGE, EXEC_BASELINE):
self.scm.remove_ref(ref)
return executors
def _reproduce(
self, executors: dict, jobs: Optional[int] = 1
) -> Mapping[str, Mapping[str, str]]:
"""Run dvc repro for the specified BaseExecutors in parallel.
Returns dict containing successfully executed experiments.
"""
result = defaultdict(dict)
manager = Manager()
pid_q = manager.Queue()
with ProcessPoolExecutor(max_workers=jobs) as workers:
futures = {}
for rev, executor in executors.items():
future = workers.submit(
executor.reproduce,
executor.dvc_dir,
pid_q,
rev,
name=executor.name,
)
futures[future] = (rev, executor)
try:
wait(futures)
except KeyboardInterrupt:
# forward SIGINT to any running executor processes and
# cancel any remaining futures
pids = {}
while not pid_q.empty():
rev, pid = pid_q.get()
pids[rev] = pid
for future, (rev, _) in futures.items():
if future.running():
os.kill(pids[rev], signal.SIGINT)
elif not future.done():
future.cancel()
for future, (rev, executor) in futures.items():
rev, executor = futures[future]
exc = future.exception()
try:
if exc is None:
exp_hash, force = future.result()
result[rev].update(
self._collect_executor(executor, exp_hash, force)
)
else:
# Checkpoint errors have already been logged
if not isinstance(exc, CheckpointKilledError):
logger.exception(
"Failed to reproduce experiment '%s'",
rev[:7],
exc_info=exc,
)
except CancelledError:
logger.error(
"Cancelled before attempting to reproduce experiment "
"'%s'",
rev[:7],
)
finally:
executor.cleanup()
return result
def _collect_executor(
self, executor, exp_hash, force
) -> Mapping[str, str]:
# NOTE: GitPython Repo instances cannot be re-used
# after process has received SIGINT or SIGTERM, so we
# need this hack to re-instantiate git instances after
# checkpoint runs. See:
# https://github.com/gitpython-developers/GitPython/issues/427
del self.repo.scm
results = {}
def on_diverged(ref: str, checkpoint: bool):
ref_info = ExpRefInfo.from_ref(ref)
if checkpoint:
raise CheckpointExistsError(ref_info.name)
raise ExperimentExistsError(ref_info.name)
for ref in executor.fetch_exps(
self.scm, force=force, on_diverged=on_diverged,
):
exp_rev = self.scm.get_ref(ref)
if exp_rev:
logger.debug("Collected experiment '%s'.", exp_rev[:7])
results[exp_rev] = exp_hash
return results
def check_baseline(self, exp_rev):
baseline_sha = self.repo.scm.get_rev()
if exp_rev == baseline_sha:
return exp_rev
exp_baseline = self._get_baseline(exp_rev)
if exp_baseline is None:
# if we can't tell from branch name, fall back to parent commit
exp_commit = self.scm.resolve_commit(exp_rev)
if exp_commit:
exp_baseline = first(exp_commit.parents).hexsha
if exp_baseline == baseline_sha:
return exp_baseline
raise BaselineMismatchError(exp_baseline, baseline_sha)
@scm_locked
def get_baseline(self, rev):
"""Return the baseline rev for an experiment rev."""
return self._get_baseline(rev)
def _get_baseline(self, rev):
rev = self.scm.resolve_rev(rev)
if rev in self.stash_revs:
entry = self.stash_revs.get(rev)
if entry:
return entry.baseline_rev
return None
ref_info = first(exp_refs_by_rev(self.scm, rev))
if ref_info:
return ref_info.baseline_sha
return None
def get_branch_by_rev(self, rev: str, allow_multiple: bool = False) -> str:
"""Returns full refname for the experiment branch containing rev."""
ref_infos = list(exp_refs_by_rev(self.scm, rev))
if not ref_infos:
return None
if len(ref_infos) > 1 and not allow_multiple:
raise MultipleBranchError(rev)
return str(ref_infos[0])
def get_exact_name(self, rev: str):
"""Returns preferred name for the specified revision.
Prefers tags, branches (heads), experiments in that orer.
"""
exclude = f"{EXEC_NAMESPACE}/*"
ref = self.scm.describe(rev, base=EXPS_NAMESPACE, exclude=exclude)
if ref:
return ExpRefInfo.from_ref(ref).name
return None
def apply(self, *args, **kwargs):
from dvc.repo.experiments.apply import apply
return apply(self.repo, *args, **kwargs)
def branch(self, *args, **kwargs):
from dvc.repo.experiments.branch import branch
return branch(self.repo, *args, **kwargs)
def diff(self, *args, **kwargs):
from dvc.repo.experiments.diff import diff
return diff(self.repo, *args, **kwargs)
def show(self, *args, **kwargs):
from dvc.repo.experiments.show import show
return show(self.repo, *args, **kwargs)
def run(self, *args, **kwargs):
from dvc.repo.experiments.run import run
return run(self.repo, *args, **kwargs)
def gc(self, *args, **kwargs):
from dvc.repo.experiments.gc import gc
return gc(self.repo, *args, **kwargs)
|
@scm_locked
def reproduce(
self,
revs: Optional[Iterable] = None,
keep_stash: Optional[bool] = True,
**kwargs,
):
"""Reproduce the specified experiments.
Args:
revs: If revs is not specified, all stashed experiments will be
reproduced.
keep_stash: If True, stashed experiments will be preserved if they
fail to reproduce successfully.
"""
stash_revs = self.stash_revs
# to_run contains mapping of:
# input_rev: (stash_index, rev, baseline_rev)
# where input_rev contains the changes to execute (usually a stash
# commit), rev is the original SCM commit to be checked out, and
# baseline_rev is the experiment baseline.
if revs is None:
to_run = dict(stash_revs)
else:
to_run = {
rev: stash_revs[rev]
if rev in stash_revs
else self.StashEntry(None, rev, rev, None, None)
for rev in revs
}
logger.debug(
"Reproducing experiment revs '%s'",
", ".join((rev[:7] for rev in to_run)),
)
executors = self._init_executors(to_run)
exec_results = self._reproduce(executors, **kwargs)
if keep_stash:
# only drop successfully run stashed experiments
to_drop = sorted(
(
stash_revs[rev][0]
for rev in exec_results
if rev in stash_revs
),
reverse=True,
)
else:
# drop all stashed experiments
to_drop = sorted(
(stash_revs[rev][0] for rev in to_run if rev in stash_revs),
reverse=True,
)
for index in to_drop:
self.stash.drop(index)
result = {}
for _, exp_result in exec_results.items():
result.update(exp_result)
return result
| 357 | 419 |
import logging
import os
import re
import signal
from collections import defaultdict, namedtuple
from concurrent.futures import CancelledError, ProcessPoolExecutor, wait
from contextlib import contextmanager
from functools import wraps
from multiprocessing import Manager
from typing import Iterable, Mapping, Optional
from funcy import cached_property, first
from dvc.dvcfile import is_lock_file
from dvc.exceptions import DvcException
from dvc.path_info import PathInfo
from dvc.stage.run import CheckpointKilledError
from dvc.utils import relpath
from .base import (
EXEC_BASELINE,
EXEC_CHECKPOINT,
EXEC_HEAD,
EXEC_MERGE,
EXEC_NAMESPACE,
EXPS_NAMESPACE,
EXPS_STASH,
BaselineMismatchError,
CheckpointExistsError,
ExperimentExistsError,
ExpRefInfo,
MultipleBranchError,
)
from .executor import BaseExecutor, LocalExecutor
from .utils import exp_refs_by_rev
logger = logging.getLogger(__name__)
def scm_locked(f):
# Lock the experiments workspace so that we don't try to perform two
# different sequences of git operations at once
@wraps(f)
def wrapper(exp, *args, **kwargs):
with exp.scm_lock:
return f(exp, *args, **kwargs)
return wrapper
class Experiments:
"""Class that manages experiments in a DVC repo.
Args:
repo (dvc.repo.Repo): repo instance that these experiments belong to.
"""
STASH_EXPERIMENT_FORMAT = "dvc-exp:{rev}:{baseline_rev}:{name}"
STASH_EXPERIMENT_RE = re.compile(
r"(?:commit: )"
r"dvc-exp:(?P<rev>[0-9a-f]+):(?P<baseline_rev>[0-9a-f]+)"
r":(?P<name>[^~^:\\?\[\]*]*)"
r"(:(?P<branch>.+))?$"
)
BRANCH_RE = re.compile(
r"^(?P<baseline_rev>[a-f0-9]{7})-(?P<exp_sha>[a-f0-9]+)"
r"(?P<checkpoint>-checkpoint)?$"
)
LAST_CHECKPOINT = ":last"
StashEntry = namedtuple(
"StashEntry", ["index", "rev", "baseline_rev", "branch", "name"]
)
def __init__(self, repo):
from dvc.lock import make_lock
from dvc.scm.base import NoSCMError
if repo.config["core"].get("no_scm", False):
raise NoSCMError
self.repo = repo
self.scm_lock = make_lock(
os.path.join(self.repo.tmp_dir, "exp_scm_lock"),
tmp_dir=self.repo.tmp_dir,
)
@property
def scm(self):
return self.repo.scm
@cached_property
def dvc_dir(self):
return relpath(self.repo.dvc_dir, self.repo.scm.root_dir)
@contextmanager
def chdir(self):
yield
@cached_property
def args_file(self):
return os.path.join(self.repo.tmp_dir, BaseExecutor.PACKED_ARGS_FILE)
@cached_property
def stash(self):
from dvc.scm.git import Stash
return Stash(self.scm, EXPS_STASH)
@property
def stash_revs(self):
revs = {}
for i, entry in enumerate(self.stash):
msg = entry.message.decode("utf-8").strip()
m = self.STASH_EXPERIMENT_RE.match(msg)
if m:
revs[entry.new_sha.decode("utf-8")] = self.StashEntry(
i,
m.group("rev"),
m.group("baseline_rev"),
m.group("branch"),
m.group("name"),
)
return revs
def _stash_exp(
self,
*args,
params: Optional[dict] = None,
detach_rev: Optional[str] = None,
baseline_rev: Optional[str] = None,
branch: Optional[str] = None,
name: Optional[str] = None,
**kwargs,
):
"""Stash changes from the workspace as an experiment.
Args:
params: Optional dictionary of parameter values to be used.
Values take priority over any parameters specified in the
user's workspace.
baseline_rev: Optional baseline rev for this experiment, defaults
to the current SCM rev.
branch: Optional experiment branch name. If specified, the
experiment will be added to `branch` instead of creating
a new branch.
name: Optional experiment name. If specified this will be used as
the human-readable name in the experiment branch ref. Has no
effect of branch is specified.
"""
with self.scm.stash_workspace(
include_untracked=detach_rev or branch
) as workspace:
# If we are not extending an existing branch, apply current
# workspace changes to be made in new branch
if not (branch or detach_rev) and workspace:
self.stash.apply(workspace)
self._prune_lockfiles()
# checkout and detach at branch (or current HEAD)
if detach_rev:
head = detach_rev
elif branch:
head = branch
else:
head = None
with self.scm.detach_head(head) as rev:
if baseline_rev is None:
baseline_rev = rev
# update experiment params from command line
if params:
self._update_params(params)
# save additional repro command line arguments
self._pack_args(*args, **kwargs)
# save experiment as a stash commit
msg = self._stash_msg(
rev, baseline_rev=baseline_rev, branch=branch, name=name
)
stash_rev = self.stash.push(message=msg)
logger.debug(
(
"Stashed experiment '%s' with baseline '%s' "
"for future execution."
),
stash_rev[:7],
baseline_rev[:7],
)
# Reset any changes before prior workspace is unstashed
self.scm.reset(hard=True)
return stash_rev
def _prune_lockfiles(self):
# NOTE: dirty DVC lock files must be restored to index state to
# avoid checking out incorrect persist or checkpoint outs
tree = self.scm.get_tree("HEAD")
lock_files = [
str(fname)
for fname in tree.walk_files(self.scm.root_dir)
if is_lock_file(fname)
]
if lock_files:
self.scm.reset(paths=lock_files)
self.scm.checkout_paths(lock_files, force=True)
def _stash_msg(
self,
rev: str,
baseline_rev: str,
branch: Optional[str] = None,
name: Optional[str] = None,
):
if not baseline_rev:
baseline_rev = rev
msg = self.STASH_EXPERIMENT_FORMAT.format(
rev=rev, baseline_rev=baseline_rev, name=name if name else ""
)
if branch:
return f"{msg}:{branch}"
return msg
def _pack_args(self, *args, **kwargs):
BaseExecutor.pack_repro_args(self.args_file, *args, **kwargs)
self.scm.add(self.args_file)
def _update_params(self, params: dict):
"""Update experiment params files with the specified values."""
from benedict import benedict
from dvc.utils.serialize import MODIFIERS
logger.debug("Using experiment params '%s'", params)
for params_fname in params:
path = PathInfo(self.repo.root_dir) / params_fname
suffix = path.suffix.lower()
modify_data = MODIFIERS[suffix]
with modify_data(path, tree=self.repo.tree) as data:
benedict(data).merge(params[params_fname], overwrite=True)
# Force params file changes to be staged in git
# Otherwise in certain situations the changes to params file may be
# ignored when we `git stash` them since mtime is used to determine
# whether the file is dirty
self.scm.add(list(params.keys()))
def reproduce_one(self, queue=False, **kwargs):
"""Reproduce and checkout a single experiment."""
stash_rev = self.new(**kwargs)
if queue:
logger.info(
"Queued experiment '%s' for future execution.", stash_rev[:7]
)
return [stash_rev]
results = self.reproduce([stash_rev], keep_stash=False)
exp_rev = first(results)
if exp_rev is not None:
self._log_reproduced(results)
return results
def reproduce_queued(self, **kwargs):
results = self.reproduce(**kwargs)
if results:
self._log_reproduced(results)
return results
def _log_reproduced(self, revs: Iterable[str]):
names = []
for rev in revs:
name = self.get_exact_name(rev)
names.append(name if name else rev[:7])
fmt = (
"\nReproduced experiment(s): %s\n"
"To promote an experiment to a Git branch run:\n\n"
"\tdvc exp branch <exp>\n\n"
"To apply the results of an experiment to your workspace run:\n\n"
"\tdvc exp apply <exp>"
)
logger.info(fmt, ", ".join(names))
@scm_locked
def new(
self, *args, checkpoint_resume: Optional[str] = None, **kwargs,
):
"""Create a new experiment.
Experiment will be reproduced and checked out into the user's
workspace.
"""
if checkpoint_resume is not None:
return self._resume_checkpoint(
*args, checkpoint_resume=checkpoint_resume, **kwargs
)
return self._stash_exp(*args, **kwargs)
def _resume_checkpoint(
self, *args, checkpoint_resume: Optional[str] = None, **kwargs,
):
"""Resume an existing (checkpoint) experiment.
Experiment will be reproduced and checked out into the user's
workspace.
"""
assert checkpoint_resume
if checkpoint_resume == self.LAST_CHECKPOINT:
# Continue from most recently committed checkpoint
resume_rev = self._get_last_checkpoint()
else:
resume_rev = self.scm.resolve_rev(checkpoint_resume)
allow_multiple = "params" in kwargs
branch = self.get_branch_by_rev(
resume_rev, allow_multiple=allow_multiple
)
if not branch:
raise DvcException(
"Could not find checkpoint experiment "
f"'{checkpoint_resume}'"
)
baseline_rev = self._get_baseline(branch)
if kwargs.get("params", None):
logger.debug(
"Branching from checkpoint '%s' with modified params, "
"baseline '%s'",
checkpoint_resume,
baseline_rev[:7],
)
detach_rev = resume_rev
branch = None
else:
logger.debug(
"Continuing from tip of checkpoint '%s'", checkpoint_resume
)
detach_rev = None
return self._stash_exp(
*args,
detach_rev=detach_rev,
baseline_rev=baseline_rev,
branch=branch,
**kwargs,
)
def _get_last_checkpoint(self):
rev = self.scm.get_ref(EXEC_CHECKPOINT)
if rev:
return rev
raise DvcException("No existing checkpoint experiment to continue")
@scm_locked
def reproduce(
self,
revs: Optional[Iterable] = None,
keep_stash: Optional[bool] = True,
**kwargs,
):
"""Reproduce the specified experiments.
Args:
revs: If revs is not specified, all stashed experiments will be
reproduced.
keep_stash: If True, stashed experiments will be preserved if they
fail to reproduce successfully.
"""
stash_revs = self.stash_revs
# to_run contains mapping of:
# input_rev: (stash_index, rev, baseline_rev)
# where input_rev contains the changes to execute (usually a stash
# commit), rev is the original SCM commit to be checked out, and
# baseline_rev is the experiment baseline.
if revs is None:
to_run = dict(stash_revs)
else:
to_run = {
rev: stash_revs[rev]
if rev in stash_revs
else self.StashEntry(None, rev, rev, None, None)
for rev in revs
}
logger.debug(
"Reproducing experiment revs '%s'",
", ".join((rev[:7] for rev in to_run)),
)
executors = self._init_executors(to_run)
exec_results = self._reproduce(executors, **kwargs)
if keep_stash:
# only drop successfully run stashed experiments
to_drop = sorted(
(
stash_revs[rev][0]
for rev in exec_results
if rev in stash_revs
),
reverse=True,
)
else:
# drop all stashed experiments
to_drop = sorted(
(stash_revs[rev][0] for rev in to_run if rev in stash_revs),
reverse=True,
)
for index in to_drop:
self.stash.drop(index)
result = {}
for _, exp_result in exec_results.items():
result.update(exp_result)
return result
def _init_executors(self, to_run):
executors = {}
for stash_rev, item in to_run.items():
self.scm.set_ref(EXEC_HEAD, item.rev)
self.scm.set_ref(EXEC_MERGE, stash_rev)
self.scm.set_ref(EXEC_BASELINE, item.baseline_rev)
# Executor will be initialized with an empty git repo that
# we populate by pushing:
# EXEC_HEAD - the base commit for this experiment
# EXEC_MERGE - the unmerged changes (from our stash)
# to be reproduced
# EXEC_BASELINE - the baseline commit for this experiment
executor = LocalExecutor(
self.scm,
self.dvc_dir,
name=item.name,
branch=item.branch,
cache_dir=self.repo.cache.local.cache_dir,
)
executors[item.rev] = executor
for ref in (EXEC_HEAD, EXEC_MERGE, EXEC_BASELINE):
self.scm.remove_ref(ref)
return executors
def _reproduce(
self, executors: dict, jobs: Optional[int] = 1
) -> Mapping[str, Mapping[str, str]]:
"""Run dvc repro for the specified BaseExecutors in parallel.
Returns dict containing successfully executed experiments.
"""
result = defaultdict(dict)
manager = Manager()
pid_q = manager.Queue()
with ProcessPoolExecutor(max_workers=jobs) as workers:
futures = {}
for rev, executor in executors.items():
future = workers.submit(
executor.reproduce,
executor.dvc_dir,
pid_q,
rev,
name=executor.name,
)
futures[future] = (rev, executor)
try:
wait(futures)
except KeyboardInterrupt:
# forward SIGINT to any running executor processes and
# cancel any remaining futures
pids = {}
while not pid_q.empty():
rev, pid = pid_q.get()
pids[rev] = pid
for future, (rev, _) in futures.items():
if future.running():
os.kill(pids[rev], signal.SIGINT)
elif not future.done():
future.cancel()
for future, (rev, executor) in futures.items():
rev, executor = futures[future]
exc = future.exception()
try:
if exc is None:
exp_hash, force = future.result()
result[rev].update(
self._collect_executor(executor, exp_hash, force)
)
else:
# Checkpoint errors have already been logged
if not isinstance(exc, CheckpointKilledError):
logger.exception(
"Failed to reproduce experiment '%s'",
rev[:7],
exc_info=exc,
)
except CancelledError:
logger.error(
"Cancelled before attempting to reproduce experiment "
"'%s'",
rev[:7],
)
finally:
executor.cleanup()
return result
def _collect_executor(
self, executor, exp_hash, force
) -> Mapping[str, str]:
# NOTE: GitPython Repo instances cannot be re-used
# after process has received SIGINT or SIGTERM, so we
# need this hack to re-instantiate git instances after
# checkpoint runs. See:
# https://github.com/gitpython-developers/GitPython/issues/427
del self.repo.scm
results = {}
def on_diverged(ref: str, checkpoint: bool):
ref_info = ExpRefInfo.from_ref(ref)
if checkpoint:
raise CheckpointExistsError(ref_info.name)
raise ExperimentExistsError(ref_info.name)
for ref in executor.fetch_exps(
self.scm, force=force, on_diverged=on_diverged,
):
exp_rev = self.scm.get_ref(ref)
if exp_rev:
logger.debug("Collected experiment '%s'.", exp_rev[:7])
results[exp_rev] = exp_hash
return results
def check_baseline(self, exp_rev):
baseline_sha = self.repo.scm.get_rev()
if exp_rev == baseline_sha:
return exp_rev
exp_baseline = self._get_baseline(exp_rev)
if exp_baseline is None:
# if we can't tell from branch name, fall back to parent commit
exp_commit = self.scm.resolve_commit(exp_rev)
if exp_commit:
exp_baseline = first(exp_commit.parents).hexsha
if exp_baseline == baseline_sha:
return exp_baseline
raise BaselineMismatchError(exp_baseline, baseline_sha)
@scm_locked
def get_baseline(self, rev):
"""Return the baseline rev for an experiment rev."""
return self._get_baseline(rev)
def _get_baseline(self, rev):
rev = self.scm.resolve_rev(rev)
if rev in self.stash_revs:
entry = self.stash_revs.get(rev)
if entry:
return entry.baseline_rev
return None
ref_info = first(exp_refs_by_rev(self.scm, rev))
if ref_info:
return ref_info.baseline_sha
return None
def get_branch_by_rev(self, rev: str, allow_multiple: bool = False) -> str:
"""Returns full refname for the experiment branch containing rev."""
ref_infos = list(exp_refs_by_rev(self.scm, rev))
if not ref_infos:
return None
if len(ref_infos) > 1 and not allow_multiple:
raise MultipleBranchError(rev)
return str(ref_infos[0])
def get_exact_name(self, rev: str):
"""Returns preferred name for the specified revision.
Prefers tags, branches (heads), experiments in that orer.
"""
exclude = f"{EXEC_NAMESPACE}/*"
ref = self.scm.describe(rev, base=EXPS_NAMESPACE, exclude=exclude)
if ref:
return ExpRefInfo.from_ref(ref).name
return None
def apply(self, *args, **kwargs):
from dvc.repo.experiments.apply import apply
return apply(self.repo, *args, **kwargs)
def branch(self, *args, **kwargs):
from dvc.repo.experiments.branch import branch
return branch(self.repo, *args, **kwargs)
def diff(self, *args, **kwargs):
from dvc.repo.experiments.diff import diff
return diff(self.repo, *args, **kwargs)
def show(self, *args, **kwargs):
from dvc.repo.experiments.show import show
return show(self.repo, *args, **kwargs)
def run(self, *args, **kwargs):
from dvc.repo.experiments.run import run
return run(self.repo, *args, **kwargs)
def gc(self, *args, **kwargs):
from dvc.repo.experiments.gc import gc
return gc(self.repo, *args, **kwargs)
|
create_model
|
Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
class CNNModel(models.BaseModel):
"""CNN model with L2 regularization."""
# MASKED: create_model function (lines 33-68)
class ResNetModel(models.BaseModel):
"""ResNet model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
# ResNet blocks
for i in range(0, 9):
temp = net + shortcut
net = slim.conv2d(temp, 3, [
3, 3], scope='conv%d_1' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
4, 4], scope='conv%d_2' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
5, 5], scope='conv%d_3' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
output = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(
tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(
tf.reshape(expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
|
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
| 33 | 68 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
class CNNModel(models.BaseModel):
"""CNN model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class ResNetModel(models.BaseModel):
"""ResNet model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
# ResNet blocks
for i in range(0, 9):
temp = net + shortcut
net = slim.conv2d(temp, 3, [
3, 3], scope='conv%d_1' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
4, 4], scope='conv%d_2' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
5, 5], scope='conv%d_3' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
output = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(
tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(
tf.reshape(expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
|
create_model
|
Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
class CNNModel(models.BaseModel):
"""CNN model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class ResNetModel(models.BaseModel):
"""ResNet model with L2 regularization."""
# MASKED: create_model function (lines 74-122)
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
output = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(
tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(
tf.reshape(expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
|
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
# ResNet blocks
for i in range(0, 9):
temp = net + shortcut
net = slim.conv2d(temp, 3, [
3, 3], scope='conv%d_1' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
4, 4], scope='conv%d_2' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
5, 5], scope='conv%d_3' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
| 74 | 122 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
class CNNModel(models.BaseModel):
"""CNN model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class ResNetModel(models.BaseModel):
"""ResNet model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
# ResNet blocks
for i in range(0, 9):
temp = net + shortcut
net = slim.conv2d(temp, 3, [
3, 3], scope='conv%d_1' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
4, 4], scope='conv%d_2' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
5, 5], scope='conv%d_3' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
output = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(
tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(
tf.reshape(expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
|
create_model
|
Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
class CNNModel(models.BaseModel):
"""CNN model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class ResNetModel(models.BaseModel):
"""ResNet model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
# ResNet blocks
for i in range(0, 9):
temp = net + shortcut
net = slim.conv2d(temp, 3, [
3, 3], scope='conv%d_1' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
4, 4], scope='conv%d_2' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
5, 5], scope='conv%d_3' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
output = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
# MASKED: create_model function (lines 155-208)
|
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(
tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(
tf.reshape(expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
| 155 | 208 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
class CNNModel(models.BaseModel):
"""CNN model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a CNN model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class ResNetModel(models.BaseModel):
"""ResNet model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a ResNet model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
model_input = tf.reshape(model_input, [-1, 32, 32])
model_input = tf.expand_dims(model_input, 3)
net = slim.conv2d(model_input, 3, [
3, 3], scope='conv1', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
4, 4], scope='conv2', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(net, 3, [
5, 5], scope='conv3', activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = tf.tile(model_input, [1, 1, 1, 3])
# ResNet blocks
for i in range(0, 9):
temp = net + shortcut
net = slim.conv2d(temp, 3, [
3, 3], scope='conv%d_1' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
4, 4], scope='conv%d_2' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
net = slim.conv2d(temp, 3, [
5, 5], scope='conv%d_3' % (i+1), activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(l2_penalty))
shortcut = temp
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = tf.reshape(net, [-1, 16 * 16 * 3])
output = slim.fully_connected(
net,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self,
model_input,
vocab_size,
l2_penalty=1e-8,
**unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
output = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(
tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(
tf.reshape(expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
|
get_corner_loss_lidar
|
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
# MASKED: get_corner_loss_lidar function (lines 212-235)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
| 212 | 235 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
__init__
|
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
# MASKED: __init__ function (lines 15-23)
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
| 15 | 23 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
forward
|
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
# MASKED: forward function (lines 45-73)
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
| 45 | 73 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
__init__
|
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
# MASKED: __init__ function (lines 86-99)
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
| 86 | 99 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
forward
|
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
# MASKED: forward function (lines 111-138)
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
| 111 | 138 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
__init__
|
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
# MASKED: __init__ function (lines 142-151)
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
| 142 | 151 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
forward
|
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
# MASKED: forward function (lines 153-180)
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
| 153 | 180 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
forward
|
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
# MASKED: forward function (lines 192-209)
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
| 192 | 209 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
_neg_loss
|
Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
# MASKED: _neg_loss function (lines 244-270)
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
| 244 | 270 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
_reg_loss
|
L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
# MASKED: _reg_loss function (lines 306-327)
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
| 306 | 327 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
__init__
|
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
# MASKED: __init__ function (lines 340-348)
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
| 340 | 348 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
forward
|
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
# MASKED: forward function (lines 370-395)
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
| 370 | 395 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
_smooth_reg_loss
|
L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
# MASKED: _smooth_reg_loss function (lines 410-438)
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
| 410 | 438 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
__init__
|
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
# MASKED: __init__ function (lines 448-457)
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
| 448 | 457 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
forward
|
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
# MASKED: forward function (lines 478-503)
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
| 478 | 503 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.iou3d_nms_diff.iou3d_nms_diff_utils import boxes_iou3d_gpu_differentiable
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class CenterNetFocalLoss(nn.Module):
"""nn.Module warpper for focal loss"""
def __init__(self):
super(CenterNetFocalLoss, self).__init__()
def _neg_loss(self, pred, gt):
""" Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
return self._neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()
feat = _gather_feat(feat, ind)
return feat.contiguous()
class CenterNetRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetRegLoss, self).__init__()
def _reg_loss(self, regr, gt_regr, mask):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = self._reg_loss(pred, target, mask)
return loss
class ForegroundFocalLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(ForegroundFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
num_pos = torch.sum(target)
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
loss = loss.sum() / num_pos
return loss
class CenterNetSmoothRegLoss(nn.Module):
"""Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
"""
def __init__(self):
super(CenterNetSmoothRegLoss, self).__init__()
def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):
""" L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
abs_diff = torch.abs(regr - gt_regr)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (
abs_diff - 0.5 / (sigma ** 2)
) * (1.0 - abs_diff_lt_1)
loss = loss.transpose(2, 0).contiguous()
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
loss = loss / (num + 1e-4)
return loss
def forward(self, output, mask, ind, target, sin_loss):
assert sin_loss is False
pred = _transpose_and_gather_feat(output, ind)
loss = self._smooth_reg_loss(pred, target, mask)
return loss
class E2ESigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(E2ESigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class SmoothL1Loss(nn.Module):
def __init__(self, sigma, reduction='mean'):
super(SmoothL1Loss, self).__init__()
self.sigma = sigma
self.reduction = reduction
def smooth_l1_loss(self, x):
sigma2 = self.sigma ** 2
cond_point = 1 / sigma2
abs_x = torch.abs(x)
in_mask = abs_x < cond_point
out_mask = 1.0 - in_mask.type_as(x)
in_value = 0.5 * (self.sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.type_as(in_value) + out_value * out_mask.type_as(out_value)
return value
def forward(self, x):
# x size (box_num, code_size)
value = self.smooth_l1_loss(x)
if self.reduction == 'sum':
loss = value.mean(dim=1)
loss = loss.sum()
else:
loss = value.sum(dim=0)
return loss
class CrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self, reduction='mean', onehot=False):
super().__init__()
self.reduction = reduction
self.onehot = onehot
def forward(self, input: torch.Tensor, target: torch.Tensor):
if self.onehot:
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction=self.reduction)
return loss
class QualityLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25, reduction='mean'):
super(QualityLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1.0 - target) * (1.0 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if self.reduction == 'sum':
return loss.sum()
else:
return loss.mean()
class IOU3DLoss(nn.Module):
def __init__(self):
super(IOU3DLoss, self).__init__()
@staticmethod
def iou3d_loss(x, y):
iou3d = boxes_iou3d_gpu_differentiable(x, y)
return 1 - iou3d
def forward(self, input, target):
input = input.contiguous()
target = target.contiguous()
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
if input.size(0) > 0:
loss = self.iou3d_loss(input, target).sum()
else:
loss = (input - target).sum(1) * 0.
return loss
|
parse_model_config_params
|
Args:
model_params:
num_settings:
random_state:
Returns:
|
import numpy as np
import yaml
from dask.distributed import Client, LocalCluster, as_completed
import argparse
from os.path import exists, join
from os import makedirs
from mlmicrophysics.data import subset_data_files_by_date, assemble_data_files
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.preprocessing import StandardScaler, RobustScaler, MaxAbsScaler, MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error, accuracy_score
from mlmicrophysics.metrics import hellinger_distance, heidke_skill_score, peirce_skill_score
from sklearn.model_selection import ParameterSampler
from scipy.stats import randint, uniform, expon
import pandas as pd
import traceback
scalers = {"MinMaxScaler": MinMaxScaler,
"MaxAbsScaler": MaxAbsScaler,
"StandardScaler": StandardScaler,
"RobustScaler": RobustScaler}
def sampler_generator(ps):
for params in ps:
yield params
# MASKED: parse_model_config_params function (lines 28-46)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Configuration yaml file")
parser.add_argument("-p", "--proc", type=int, default=1, help="Number of processors")
args = parser.parse_args()
if not exists(args.config):
raise FileNotFoundError(args.config + " not found.")
with open(args.config) as config_file:
config = yaml.load(config_file)
train_files, val_files, test_files = subset_data_files_by_date(config["data_path"],
config["data_end"], **config["subset_data"])
input_scaler = scalers[config["input_scaler"]]()
train_input, \
train_output_labels, \
train_transformed_output, \
train_scaled_output, \
output_scalers = assemble_data_files(train_files,
config["input_cols"],
config["output_cols"],
config["input_transforms"],
config["output_transforms"],
input_scaler,
subsample=config["subsample"])
print("Train Input Size:", train_input.shape)
val_input, \
val_output_labels, \
val_transformed_output, \
val_scaled_output, \
output_scalers = assemble_data_files(val_files,
config["input_cols"],
config["output_cols"],
config["input_transforms"],
config["output_transforms"],
input_scaler,
output_scalers=output_scalers,
train=False,
subsample=config["subsample"])
print("Val Input Size:", val_input.shape)
cluster = LocalCluster(n_workers=args.proc, threads_per_worker=1)
client = Client(cluster)
print(client)
train_input_link = client.scatter(train_input)
train_labels_link = client.scatter(train_output_labels)
train_scaled_output_link = client.scatter(train_scaled_output)
val_input_link = client.scatter(val_input)
val_output_labels_link = client.scatter(val_output_labels)
val_scaled_output_link = client.scatter(val_scaled_output)
submissions = []
if not exists(config["out_path"]):
makedirs(config["out_path"])
for class_model_name, class_model_params in config["classifier_models"].items():
for reg_model_name, reg_model_params in config["regressor_models"].items():
rs = np.random.RandomState(config["random_seed"])
class_model_config_generator = parse_model_config_params(class_model_params,
config["num_param_samples"],
rs)
reg_model_config_generator = parse_model_config_params(reg_model_params,
config["num_param_samples"],
rs)
class_model_configs = []
reg_model_configs = []
for s in range(config["num_param_samples"]):
class_model_config = next(class_model_config_generator)
reg_model_config = next(reg_model_config_generator)
class_model_configs.append(class_model_config)
reg_model_configs.append(reg_model_config)
config_index = f"{class_model_name}_{reg_model_name}_{s:04}"
submissions.append(client.submit(validate_model_configuration,
class_model_name, class_model_config,
reg_model_name, reg_model_config, config_index,
train_input_link, train_labels_link,
train_scaled_output_link,
val_input_link, val_output_labels_link,
val_scaled_output_link,
config["classifier_metrics"],
config["regressor_metrics"]))
class_config_frame = pd.DataFrame(class_model_configs)
reg_config_frame = pd.DataFrame(reg_model_configs)
class_config_frame.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_classifier_params.csv"),
index_label="Config")
reg_config_frame.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_regressor_params.csv"))
result_count = 0
for out in as_completed(submissions):
if out.status == "finished":
result = out.result()
print(result)
if result_count == 0:
result.to_frame().T.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_metrics.csv"),
index_label="Config")
else:
result.to_frame().T.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_metrics.csv"),
header=False,
mode="a")
result_count += 1
else:
tb = out.traceback()
for line in traceback.format_tb(tb):
print(line)
del submissions[:]
client.close()
cluster.close()
return
def validate_model_configuration(classifier_model_name, classifier_model_config,
regressor_model_name, regressor_model_config, config_index,
train_scaled_input, train_labels, train_scaled_output,
val_scaled_input, val_labels, val_scaled_output,
classifier_metric_list, regressor_metric_list):
"""
Train a single machine learning model configuration to predict each microphysical tendency.
Args:
classifier_model_name:
classifier_model_config:
regressor_model_name:
regressor_model_config:
config_index:
train_scaled_input:
train_labels:
train_scaled_output:
val_scaled_input:
val_labels:
val_scaled_output:
classifier_metric_list:
regressor_metric_list:
Returns:
"""
from mlmicrophysics.models import DenseNeuralNetwork, DenseGAN
import keras.backend as K
metrics = {"mse": mean_squared_error,
"mae": mean_absolute_error,
"r2": r2_score,
"hellinger": hellinger_distance,
"acc": accuracy_score,
"hss": heidke_skill_score,
"pss": peirce_skill_score}
sess = K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))
K.set_session(sess)
with sess.as_default():
model_classes = {"RandomForestRegressor": RandomForestRegressor,
"RandomForestClassifier": RandomForestClassifier,
"DenseNeuralNetwork": DenseNeuralNetwork,
"DenseGAN": DenseGAN}
classifier_models = {}
regressor_models = {}
output_label_preds = pd.DataFrame(0, index=val_labels.index, columns=val_labels.columns,
dtype=np.int32)
output_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns,
dtype=np.float32)
output_regressor_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns,
dtype=np.float32)
output_metric_columns = []
for output_col in train_scaled_output.columns:
for metric in classifier_metric_list:
output_metric_columns.append(output_col + "_" + metric)
for metric in regressor_metric_list:
output_metric_columns.append(output_col + "_" + metric)
unique_labels = np.unique(train_labels[output_col])
for unique_label in unique_labels:
for metric in regressor_metric_list:
output_metric_columns.append(f"{output_col}_{unique_label}_{metric}")
output_metrics = pd.Series(index=output_metric_columns, name=config_index, dtype=np.float32)
for output_col in train_scaled_output.columns:
print(output_col)
unique_labels = np.unique(train_labels[output_col])
if unique_labels.size > 1:
if classifier_model_name in ["DenseNeuralNetwork", "DenseGAN"]:
classifier_models[output_col] = model_classes[classifier_model_name](outputs=unique_labels.size,
classifier=True,
**classifier_model_config)
else:
classifier_models[output_col] = model_classes[classifier_model_name](**classifier_model_config)
classifier_models[output_col].fit(train_scaled_input, train_labels[output_col])
output_label_preds.loc[:, output_col] = classifier_models[output_col].predict(val_scaled_input)
for metric in classifier_metric_list:
output_metrics[output_col + "_" + metric] = metrics[metric](val_labels[output_col].values,
output_label_preds[output_col].values)
else:
output_label_preds.loc[:, output_col] = unique_labels[0]
regressor_models[output_col] = {}
for label in unique_labels:
if label != 0:
if regressor_model_name in ["DenseNeuralNetwork", "DenseGAN"]:
regressor_models[output_col][label] = model_classes[regressor_model_name](classifier=False,
**regressor_model_config)
else:
regressor_models[output_col][label] = model_classes[regressor_model_name](**regressor_model_config)
regressor_models[output_col][label].fit(train_scaled_input.loc[train_labels[output_col] == label],
train_scaled_output.loc[train_labels[output_col] == label,
output_col])
if np.count_nonzero(output_label_preds[output_col] == label) > 0:
output_preds.loc[output_label_preds[output_col] == label,
output_col] = regressor_models[output_col][
label].predict(val_scaled_input.loc[output_label_preds[output_col] == label])
output_regressor_preds.loc[val_labels[output_col] == label,
output_col] = regressor_models[output_col][
label].predict(val_scaled_input.loc[val_labels[output_col] == label])
for metric in regressor_metric_list:
output_metrics[f"{output_col}_{label}_{metric}"] = metrics[metric](val_scaled_output.loc[val_labels[output_col] == label, output_col].values,
output_regressor_preds.loc[val_labels[output_col] == label, output_col].values)
for metric in regressor_metric_list:
output_metrics[output_col + "_" + metric] = metrics[metric](val_scaled_output[output_col].values,
output_preds[output_col].values)
return output_metrics
if __name__ == "__main__":
main()
|
def parse_model_config_params(model_params, num_settings, random_state):
"""
Args:
model_params:
num_settings:
random_state:
Returns:
"""
param_distributions = dict()
dist_types = dict(randint=randint, expon=expon, uniform=uniform)
for param, param_value in model_params.items():
if param_value[0] in ["randint", "expon", "uniform"]:
param_distributions[param] = dist_types[param_value[0]](*param_value[1:])
else:
param_distributions[param] = param_value
return sampler_generator(ParameterSampler(param_distributions, n_iter=num_settings, random_state=random_state))
| 28 | 46 |
import numpy as np
import yaml
from dask.distributed import Client, LocalCluster, as_completed
import argparse
from os.path import exists, join
from os import makedirs
from mlmicrophysics.data import subset_data_files_by_date, assemble_data_files
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.preprocessing import StandardScaler, RobustScaler, MaxAbsScaler, MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error, accuracy_score
from mlmicrophysics.metrics import hellinger_distance, heidke_skill_score, peirce_skill_score
from sklearn.model_selection import ParameterSampler
from scipy.stats import randint, uniform, expon
import pandas as pd
import traceback
scalers = {"MinMaxScaler": MinMaxScaler,
"MaxAbsScaler": MaxAbsScaler,
"StandardScaler": StandardScaler,
"RobustScaler": RobustScaler}
def sampler_generator(ps):
for params in ps:
yield params
def parse_model_config_params(model_params, num_settings, random_state):
"""
Args:
model_params:
num_settings:
random_state:
Returns:
"""
param_distributions = dict()
dist_types = dict(randint=randint, expon=expon, uniform=uniform)
for param, param_value in model_params.items():
if param_value[0] in ["randint", "expon", "uniform"]:
param_distributions[param] = dist_types[param_value[0]](*param_value[1:])
else:
param_distributions[param] = param_value
return sampler_generator(ParameterSampler(param_distributions, n_iter=num_settings, random_state=random_state))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Configuration yaml file")
parser.add_argument("-p", "--proc", type=int, default=1, help="Number of processors")
args = parser.parse_args()
if not exists(args.config):
raise FileNotFoundError(args.config + " not found.")
with open(args.config) as config_file:
config = yaml.load(config_file)
train_files, val_files, test_files = subset_data_files_by_date(config["data_path"],
config["data_end"], **config["subset_data"])
input_scaler = scalers[config["input_scaler"]]()
train_input, \
train_output_labels, \
train_transformed_output, \
train_scaled_output, \
output_scalers = assemble_data_files(train_files,
config["input_cols"],
config["output_cols"],
config["input_transforms"],
config["output_transforms"],
input_scaler,
subsample=config["subsample"])
print("Train Input Size:", train_input.shape)
val_input, \
val_output_labels, \
val_transformed_output, \
val_scaled_output, \
output_scalers = assemble_data_files(val_files,
config["input_cols"],
config["output_cols"],
config["input_transforms"],
config["output_transforms"],
input_scaler,
output_scalers=output_scalers,
train=False,
subsample=config["subsample"])
print("Val Input Size:", val_input.shape)
cluster = LocalCluster(n_workers=args.proc, threads_per_worker=1)
client = Client(cluster)
print(client)
train_input_link = client.scatter(train_input)
train_labels_link = client.scatter(train_output_labels)
train_scaled_output_link = client.scatter(train_scaled_output)
val_input_link = client.scatter(val_input)
val_output_labels_link = client.scatter(val_output_labels)
val_scaled_output_link = client.scatter(val_scaled_output)
submissions = []
if not exists(config["out_path"]):
makedirs(config["out_path"])
for class_model_name, class_model_params in config["classifier_models"].items():
for reg_model_name, reg_model_params in config["regressor_models"].items():
rs = np.random.RandomState(config["random_seed"])
class_model_config_generator = parse_model_config_params(class_model_params,
config["num_param_samples"],
rs)
reg_model_config_generator = parse_model_config_params(reg_model_params,
config["num_param_samples"],
rs)
class_model_configs = []
reg_model_configs = []
for s in range(config["num_param_samples"]):
class_model_config = next(class_model_config_generator)
reg_model_config = next(reg_model_config_generator)
class_model_configs.append(class_model_config)
reg_model_configs.append(reg_model_config)
config_index = f"{class_model_name}_{reg_model_name}_{s:04}"
submissions.append(client.submit(validate_model_configuration,
class_model_name, class_model_config,
reg_model_name, reg_model_config, config_index,
train_input_link, train_labels_link,
train_scaled_output_link,
val_input_link, val_output_labels_link,
val_scaled_output_link,
config["classifier_metrics"],
config["regressor_metrics"]))
class_config_frame = pd.DataFrame(class_model_configs)
reg_config_frame = pd.DataFrame(reg_model_configs)
class_config_frame.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_classifier_params.csv"),
index_label="Config")
reg_config_frame.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_regressor_params.csv"))
result_count = 0
for out in as_completed(submissions):
if out.status == "finished":
result = out.result()
print(result)
if result_count == 0:
result.to_frame().T.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_metrics.csv"),
index_label="Config")
else:
result.to_frame().T.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_metrics.csv"),
header=False,
mode="a")
result_count += 1
else:
tb = out.traceback()
for line in traceback.format_tb(tb):
print(line)
del submissions[:]
client.close()
cluster.close()
return
def validate_model_configuration(classifier_model_name, classifier_model_config,
regressor_model_name, regressor_model_config, config_index,
train_scaled_input, train_labels, train_scaled_output,
val_scaled_input, val_labels, val_scaled_output,
classifier_metric_list, regressor_metric_list):
"""
Train a single machine learning model configuration to predict each microphysical tendency.
Args:
classifier_model_name:
classifier_model_config:
regressor_model_name:
regressor_model_config:
config_index:
train_scaled_input:
train_labels:
train_scaled_output:
val_scaled_input:
val_labels:
val_scaled_output:
classifier_metric_list:
regressor_metric_list:
Returns:
"""
from mlmicrophysics.models import DenseNeuralNetwork, DenseGAN
import keras.backend as K
metrics = {"mse": mean_squared_error,
"mae": mean_absolute_error,
"r2": r2_score,
"hellinger": hellinger_distance,
"acc": accuracy_score,
"hss": heidke_skill_score,
"pss": peirce_skill_score}
sess = K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))
K.set_session(sess)
with sess.as_default():
model_classes = {"RandomForestRegressor": RandomForestRegressor,
"RandomForestClassifier": RandomForestClassifier,
"DenseNeuralNetwork": DenseNeuralNetwork,
"DenseGAN": DenseGAN}
classifier_models = {}
regressor_models = {}
output_label_preds = pd.DataFrame(0, index=val_labels.index, columns=val_labels.columns,
dtype=np.int32)
output_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns,
dtype=np.float32)
output_regressor_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns,
dtype=np.float32)
output_metric_columns = []
for output_col in train_scaled_output.columns:
for metric in classifier_metric_list:
output_metric_columns.append(output_col + "_" + metric)
for metric in regressor_metric_list:
output_metric_columns.append(output_col + "_" + metric)
unique_labels = np.unique(train_labels[output_col])
for unique_label in unique_labels:
for metric in regressor_metric_list:
output_metric_columns.append(f"{output_col}_{unique_label}_{metric}")
output_metrics = pd.Series(index=output_metric_columns, name=config_index, dtype=np.float32)
for output_col in train_scaled_output.columns:
print(output_col)
unique_labels = np.unique(train_labels[output_col])
if unique_labels.size > 1:
if classifier_model_name in ["DenseNeuralNetwork", "DenseGAN"]:
classifier_models[output_col] = model_classes[classifier_model_name](outputs=unique_labels.size,
classifier=True,
**classifier_model_config)
else:
classifier_models[output_col] = model_classes[classifier_model_name](**classifier_model_config)
classifier_models[output_col].fit(train_scaled_input, train_labels[output_col])
output_label_preds.loc[:, output_col] = classifier_models[output_col].predict(val_scaled_input)
for metric in classifier_metric_list:
output_metrics[output_col + "_" + metric] = metrics[metric](val_labels[output_col].values,
output_label_preds[output_col].values)
else:
output_label_preds.loc[:, output_col] = unique_labels[0]
regressor_models[output_col] = {}
for label in unique_labels:
if label != 0:
if regressor_model_name in ["DenseNeuralNetwork", "DenseGAN"]:
regressor_models[output_col][label] = model_classes[regressor_model_name](classifier=False,
**regressor_model_config)
else:
regressor_models[output_col][label] = model_classes[regressor_model_name](**regressor_model_config)
regressor_models[output_col][label].fit(train_scaled_input.loc[train_labels[output_col] == label],
train_scaled_output.loc[train_labels[output_col] == label,
output_col])
if np.count_nonzero(output_label_preds[output_col] == label) > 0:
output_preds.loc[output_label_preds[output_col] == label,
output_col] = regressor_models[output_col][
label].predict(val_scaled_input.loc[output_label_preds[output_col] == label])
output_regressor_preds.loc[val_labels[output_col] == label,
output_col] = regressor_models[output_col][
label].predict(val_scaled_input.loc[val_labels[output_col] == label])
for metric in regressor_metric_list:
output_metrics[f"{output_col}_{label}_{metric}"] = metrics[metric](val_scaled_output.loc[val_labels[output_col] == label, output_col].values,
output_regressor_preds.loc[val_labels[output_col] == label, output_col].values)
for metric in regressor_metric_list:
output_metrics[output_col + "_" + metric] = metrics[metric](val_scaled_output[output_col].values,
output_preds[output_col].values)
return output_metrics
if __name__ == "__main__":
main()
|
test_with_double_quoted_multiple_words
|
Test with double-quoted multiple words.
A completed quote will trigger this. Unclosed quotes are ignored.
|
from __future__ import absolute_import, unicode_literals
from unittest import TestCase as UnitTestCase
import django
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.test import TestCase, TransactionTestCase
from django.utils.encoding import force_text
from .forms import CustomPKFoodForm, DirectFoodForm, FoodForm, OfficialFoodForm
from .models import (Article, Child, CustomManager, CustomPKFood,
CustomPKHousePet, CustomPKPet, DirectFood,
DirectHousePet, DirectPet, Food, HousePet, Movie,
OfficialFood, OfficialHousePet, OfficialPet,
OfficialTag, OfficialThroughModel, Pet, Photo,
TaggedCustomPKFood, TaggedCustomPKPet, TaggedFood,
TaggedPet)
from taggit.managers import _model_name, _TaggableManager, TaggableManager
from taggit.models import Tag, TaggedItem
from taggit.utils import edit_string_for_tags, parse_tags
try:
from unittest import skipIf, skipUnless
except ImportError:
from django.utils.unittest import skipIf, skipUnless
class BaseTaggingTest(object):
def assert_tags_equal(self, qs, tags, sort=True, attr="name"):
got = [getattr(obj, attr) for obj in qs]
if sort:
got.sort()
tags.sort()
self.assertEqual(got, tags)
def _get_form_str(self, form_str):
if django.VERSION >= (1, 3):
form_str %= {
"help_start": '<span class="helptext">',
"help_stop": "</span>"
}
else:
form_str %= {
"help_start": "",
"help_stop": ""
}
return form_str
def assert_form_renders(self, form, html):
self.assertHTMLEqual(str(form), self._get_form_str(html))
class BaseTaggingTestCase(TestCase, BaseTaggingTest):
pass
class BaseTaggingTransactionTestCase(TransactionTestCase, BaseTaggingTest):
pass
class TagModelTestCase(BaseTaggingTransactionTestCase):
food_model = Food
tag_model = Tag
def test_unique_slug(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("Red", "red")
def test_update(self):
special = self.tag_model.objects.create(name="special")
special.save()
def test_add(self):
apple = self.food_model.objects.create(name="apple")
yummy = self.tag_model.objects.create(name="yummy")
apple.tags.add(yummy)
def test_slugify(self):
a = Article.objects.create(title="django-taggit 1.0 Released")
a.tags.add("awesome", "release", "AWESOME")
self.assert_tags_equal(a.tags.all(), [
"category-awesome",
"category-release",
"category-awesome-1"
], attr="slug")
def test_integers(self):
"""Adding an integer as a tag should raise a ValueError (#237)."""
apple = self.food_model.objects.create(name="apple")
with self.assertRaisesRegexp(ValueError, (
r"Cannot add 1 \(<(type|class) 'int'>\). "
r"Expected <class 'django.db.models.base.ModelBase'> or str.")):
apple.tags.add(1)
class TagModelDirectTestCase(TagModelTestCase):
food_model = DirectFood
tag_model = Tag
class TagModelCustomPKTestCase(TagModelTestCase):
food_model = CustomPKFood
tag_model = Tag
class TagModelOfficialTestCase(TagModelTestCase):
food_model = OfficialFood
tag_model = OfficialTag
class TaggableManagerTestCase(BaseTaggingTestCase):
food_model = Food
pet_model = Pet
housepet_model = HousePet
taggeditem_model = TaggedItem
tag_model = Tag
def test_add_tag(self):
apple = self.food_model.objects.create(name="apple")
self.assertEqual(list(apple.tags.all()), [])
self.assertEqual(list(self.food_model.tags.all()), [])
apple.tags.add('green')
self.assert_tags_equal(apple.tags.all(), ['green'])
self.assert_tags_equal(self.food_model.tags.all(), ['green'])
pear = self.food_model.objects.create(name="pear")
pear.tags.add('green')
self.assert_tags_equal(pear.tags.all(), ['green'])
self.assert_tags_equal(self.food_model.tags.all(), ['green'])
apple.tags.add('red')
self.assert_tags_equal(apple.tags.all(), ['green', 'red'])
self.assert_tags_equal(self.food_model.tags.all(), ['green', 'red'])
self.assert_tags_equal(
self.food_model.tags.most_common(),
['green', 'red'],
sort=False
)
apple.tags.remove('green')
self.assert_tags_equal(apple.tags.all(), ['red'])
self.assert_tags_equal(self.food_model.tags.all(), ['green', 'red'])
tag = self.tag_model.objects.create(name="delicious")
apple.tags.add(tag)
self.assert_tags_equal(apple.tags.all(), ["red", "delicious"])
apple.delete()
self.assert_tags_equal(self.food_model.tags.all(), ["green"])
def test_add_queries(self):
# Prefill content type cache:
ContentType.objects.get_for_model(self.food_model)
apple = self.food_model.objects.create(name="apple")
# 1 query to see which tags exist
# + 3 queries to create the tags.
# + 6 queries to create the intermediary things (including SELECTs, to
# make sure we don't double create.
# + 12 on Django 1.6 for save points.
queries = 22
if django.VERSION < (1, 6):
queries -= 12
self.assertNumQueries(queries, apple.tags.add, "red", "delicious", "green")
pear = self.food_model.objects.create(name="pear")
# 1 query to see which tags exist
# + 4 queries to create the intermeidary things (including SELECTs, to
# make sure we dont't double create.
# + 4 on Django 1.6 for save points.
queries = 9
if django.VERSION < (1, 6):
queries -= 4
self.assertNumQueries(queries, pear.tags.add, "green", "delicious")
self.assertNumQueries(0, pear.tags.add)
def test_require_pk(self):
food_instance = self.food_model()
self.assertRaises(ValueError, lambda: food_instance.tags.all())
def test_delete_obj(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red")
self.assert_tags_equal(apple.tags.all(), ["red"])
strawberry = self.food_model.objects.create(name="strawberry")
strawberry.tags.add("red")
apple.delete()
self.assert_tags_equal(strawberry.tags.all(), ["red"])
def test_delete_bulk(self):
apple = self.food_model.objects.create(name="apple")
kitty = self.pet_model.objects.create(pk=apple.pk, name="kitty")
apple.tags.add("red", "delicious", "fruit")
kitty.tags.add("feline")
self.food_model.objects.all().delete()
self.assert_tags_equal(kitty.tags.all(), ["feline"])
def test_lookup_by_tag(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red", "green")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green")
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["red"])),
[apple]
)
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["green"])),
[apple, pear]
)
kitty = self.pet_model.objects.create(name="kitty")
kitty.tags.add("fuzzy", "red")
dog = self.pet_model.objects.create(name="dog")
dog.tags.add("woof", "red")
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["red"]).distinct()),
[apple]
)
tag = self.tag_model.objects.get(name="woof")
self.assertEqual(list(self.pet_model.objects.filter(tags__in=[tag])), [dog])
cat = self.housepet_model.objects.create(name="cat", trained=True)
cat.tags.add("fuzzy")
pks = self.pet_model.objects.filter(tags__name__in=["fuzzy"])
model_name = self.pet_model.__name__
self.assertQuerysetEqual(pks,
['<{0}: kitty>'.format(model_name),
'<{0}: cat>'.format(model_name)],
ordered=False)
def test_lookup_bulk(self):
apple = self.food_model.objects.create(name="apple")
pear = self.food_model.objects.create(name="pear")
apple.tags.add('fruit', 'green')
pear.tags.add('fruit', 'yummie')
def lookup_qs():
# New fix: directly allow WHERE object_id IN (SELECT id FROM ..)
objects = self.food_model.objects.all()
lookup = self.taggeditem_model.bulk_lookup_kwargs(objects)
list(self.taggeditem_model.objects.filter(**lookup))
def lookup_list():
# Simulate old situation: iterate over a list.
objects = list(self.food_model.objects.all())
lookup = self.taggeditem_model.bulk_lookup_kwargs(objects)
list(self.taggeditem_model.objects.filter(**lookup))
self.assertNumQueries(1, lookup_qs)
self.assertNumQueries(2, lookup_list)
def test_exclude(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red", "green", "delicious")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green", "delicious")
self.food_model.objects.create(name="guava")
pks = self.food_model.objects.exclude(tags__name__in=["red"])
model_name = self.food_model.__name__
self.assertQuerysetEqual(pks,
['<{0}: pear>'.format(model_name),
'<{0}: guava>'.format(model_name)],
ordered=False)
def test_similarity_by_tag(self):
"""Test that pears are more similar to apples than watermelons"""
apple = self.food_model.objects.create(name="apple")
apple.tags.add("green", "juicy", "small", "sour")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green", "juicy", "small", "sweet")
watermelon = self.food_model.objects.create(name="watermelon")
watermelon.tags.add("green", "juicy", "large", "sweet")
similar_objs = apple.tags.similar_objects()
self.assertEqual(similar_objs, [pear, watermelon])
self.assertEqual([obj.similar_tags for obj in similar_objs],
[3, 2])
def test_tag_reuse(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("juicy", "juicy")
self.assert_tags_equal(apple.tags.all(), ['juicy'])
def test_query_traverse(self):
spot = self.pet_model.objects.create(name='Spot')
spike = self.pet_model.objects.create(name='Spike')
spot.tags.add('scary')
spike.tags.add('fluffy')
lookup_kwargs = {
'%s__name' % _model_name(self.pet_model): 'Spot'
}
self.assert_tags_equal(
self.tag_model.objects.filter(**lookup_kwargs),
['scary']
)
def test_taggeditem_unicode(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("juicy")
self.assertEqual(
force_text(self.taggeditem_model.objects.all()[0]),
"apple tagged with juicy"
)
def test_abstract_subclasses(self):
p = Photo.objects.create()
p.tags.add("outdoors", "pretty")
self.assert_tags_equal(
p.tags.all(),
["outdoors", "pretty"]
)
m = Movie.objects.create()
m.tags.add("hd")
self.assert_tags_equal(
m.tags.all(),
["hd"],
)
def test_field_api(self):
# Check if tag field, which simulates m2m, has django-like api.
field = self.food_model._meta.get_field('tags')
self.assertTrue(hasattr(field, 'rel'))
self.assertTrue(hasattr(field.rel, 'to'))
self.assertTrue(hasattr(field, 'related'))
self.assertEqual(self.food_model, field.related.model)
def test_names_method(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('green')
apple.tags.add('red')
self.assertEqual(list(apple.tags.names()), ['green', 'red'])
def test_slugs_method(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('green and juicy')
apple.tags.add('red')
self.assertEqual(list(apple.tags.slugs()), ['green-and-juicy', 'red'])
def test_serializes(self):
apple = self.food_model.objects.create(name="apple")
serializers.serialize("json", (apple,))
def test_prefetch_related(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('1', '2')
orange = self.food_model.objects.create(name="orange")
orange.tags.add('2', '4')
with self.assertNumQueries(2):
l = list(self.food_model.objects.prefetch_related('tags').all())
with self.assertNumQueries(0):
foods = dict((f.name, set(t.name for t in f.tags.all())) for f in l)
self.assertEqual(foods, {
'orange': set(['2', '4']),
'apple': set(['1', '2'])
})
class TaggableManagerDirectTestCase(TaggableManagerTestCase):
food_model = DirectFood
pet_model = DirectPet
housepet_model = DirectHousePet
taggeditem_model = TaggedFood
class TaggableManagerCustomPKTestCase(TaggableManagerTestCase):
food_model = CustomPKFood
pet_model = CustomPKPet
housepet_model = CustomPKHousePet
taggeditem_model = TaggedCustomPKFood
def test_require_pk(self):
# TODO with a charfield pk, pk is never None, so taggit has no way to
# tell if the instance is saved or not
pass
class TaggableManagerOfficialTestCase(TaggableManagerTestCase):
food_model = OfficialFood
pet_model = OfficialPet
housepet_model = OfficialHousePet
taggeditem_model = OfficialThroughModel
tag_model = OfficialTag
def test_extra_fields(self):
self.tag_model.objects.create(name="red")
self.tag_model.objects.create(name="delicious", official=True)
apple = self.food_model.objects.create(name="apple")
apple.tags.add("delicious", "red")
pear = self.food_model.objects.create(name="Pear")
pear.tags.add("delicious")
self.assertEqual(apple, self.food_model.objects.get(tags__official=False))
class TaggableManagerInitializationTestCase(TaggableManagerTestCase):
"""Make sure manager override defaults and sets correctly."""
food_model = Food
custom_manager_model = CustomManager
def test_default_manager(self):
self.assertEqual(self.food_model.tags.__class__, _TaggableManager)
def test_custom_manager(self):
self.assertEqual(self.custom_manager_model.tags.__class__, CustomManager.Foo)
class TaggableFormTestCase(BaseTaggingTestCase):
form_class = FoodForm
food_model = Food
def test_form(self):
self.assertEqual(list(self.form_class.base_fields), ['name', 'tags'])
f = self.form_class({'name': 'apple', 'tags': 'green, red, yummy'})
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value="green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
f.save()
apple = self.food_model.objects.get(name='apple')
self.assert_tags_equal(apple.tags.all(), ['green', 'red', 'yummy'])
f = self.form_class({'name': 'apple', 'tags': 'green, red, yummy, delicious'}, instance=apple)
f.save()
apple = self.food_model.objects.get(name='apple')
self.assert_tags_equal(apple.tags.all(), ['green', 'red', 'yummy', 'delicious'])
self.assertEqual(self.food_model.objects.count(), 1)
f = self.form_class({"name": "raspberry"})
self.assertFalse(f.is_valid())
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value="delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
apple.tags.add('has,comma')
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value=""has,comma", delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
apple.tags.add('has space')
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value=""has space", "has,comma", delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
def test_formfield(self):
tm = TaggableManager(verbose_name='categories', help_text='Add some categories', blank=True)
ff = tm.formfield()
self.assertEqual(ff.label, 'Categories')
self.assertEqual(ff.help_text, 'Add some categories')
self.assertEqual(ff.required, False)
self.assertEqual(ff.clean(""), [])
tm = TaggableManager()
ff = tm.formfield()
self.assertRaises(ValidationError, ff.clean, "")
class TaggableFormDirectTestCase(TaggableFormTestCase):
form_class = DirectFoodForm
food_model = DirectFood
class TaggableFormCustomPKTestCase(TaggableFormTestCase):
form_class = CustomPKFoodForm
food_model = CustomPKFood
class TaggableFormOfficialTestCase(TaggableFormTestCase):
form_class = OfficialFoodForm
food_model = OfficialFood
class TagStringParseTestCase(UnitTestCase):
"""
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
def test_with_simple_space_delimited_tags(self):
"""
Test with simple space-delimited tags.
"""
self.assertEqual(parse_tags('one'), ['one'])
self.assertEqual(parse_tags('one two'), ['one', 'two'])
self.assertEqual(parse_tags('one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('one one two two'), ['one', 'two'])
def test_with_comma_delimited_multiple_words(self):
"""
Test with comma-delimited multiple words.
An unquoted comma in the input will trigger this.
"""
self.assertEqual(parse_tags(',one'), ['one'])
self.assertEqual(parse_tags(',one two'), ['one two'])
self.assertEqual(parse_tags(',one two three'), ['one two three'])
self.assertEqual(parse_tags('a-one, a-two and a-three'),
['a-one', 'a-two and a-three'])
# MASKED: test_with_double_quoted_multiple_words function (lines 506-516)
def test_with_no_loose_commas(self):
"""
Test with no loose commas -- split on spaces.
"""
self.assertEqual(parse_tags('one two "thr,ee"'), ['one', 'thr,ee', 'two'])
def test_with_loose_commas(self):
"""
Loose commas - split on commas
"""
self.assertEqual(parse_tags('"one", two three'), ['one', 'two three'])
def test_tags_with_double_quotes_can_contain_commas(self):
"""
Double quotes can contain commas
"""
self.assertEqual(parse_tags('a-one "a-two, and a-three"'),
['a-one', 'a-two, and a-three'])
self.assertEqual(parse_tags('"two", one, one, two, "one"'),
['one', 'two'])
def test_with_naughty_input(self):
"""
Test with naughty input.
"""
# Bad users! Naughty users!
self.assertEqual(parse_tags(None), [])
self.assertEqual(parse_tags(''), [])
self.assertEqual(parse_tags('"'), [])
self.assertEqual(parse_tags('""'), [])
self.assertEqual(parse_tags('"' * 7), [])
self.assertEqual(parse_tags(',,,,,,'), [])
self.assertEqual(parse_tags('",",",",",",","'), [','])
self.assertEqual(parse_tags('a-one "a-two" and "a-three'),
['a-one', 'a-three', 'a-two', 'and'])
def test_recreation_of_tag_list_string_representations(self):
plain = Tag.objects.create(name='plain')
spaces = Tag.objects.create(name='spa ces')
comma = Tag.objects.create(name='com,ma')
self.assertEqual(edit_string_for_tags([plain]), 'plain')
self.assertEqual(edit_string_for_tags([plain, spaces]), '"spa ces", plain')
self.assertEqual(edit_string_for_tags([plain, spaces, comma]), '"com,ma", "spa ces", plain')
self.assertEqual(edit_string_for_tags([plain, comma]), '"com,ma", plain')
self.assertEqual(edit_string_for_tags([comma, spaces]), '"com,ma", "spa ces"')
@skipIf(django.VERSION < (1, 7), "not relevant for Django < 1.7")
class DeconstructTestCase(UnitTestCase):
def test_deconstruct_kwargs_kept(self):
instance = TaggableManager(through=OfficialThroughModel, to='dummy.To')
name, path, args, kwargs = instance.deconstruct()
new_instance = TaggableManager(*args, **kwargs)
self.assertEqual('tests.OfficialThroughModel', new_instance.rel.through)
self.assertEqual('dummy.To', new_instance.rel.to)
@skipUnless(django.VERSION < (1, 7), "test only applies to 1.6 and below")
class SouthSupportTests(TestCase):
def test_import_migrations_module(self):
try:
from taggit.migrations import __doc__ # noqa
except ImproperlyConfigured as e:
exception = e
self.assertIn("SOUTH_MIGRATION_MODULES", exception.args[0])
class InheritedPrefetchTests(TestCase):
def test_inherited_tags_with_prefetch(self):
child = Child()
child.save()
child.tags.add('tag 1', 'tag 2', 'tag 3', 'tag 4')
child = Child.objects.get()
no_prefetch_tags = child.tags.all()
self.assertEquals(4, no_prefetch_tags.count())
child = Child.objects.prefetch_related('tags').get()
prefetch_tags = child.tags.all()
self.assertEquals(4, prefetch_tags.count())
self.assertEquals(set([t.name for t in no_prefetch_tags]),
set([t.name for t in prefetch_tags]))
|
def test_with_double_quoted_multiple_words(self):
"""
Test with double-quoted multiple words.
A completed quote will trigger this. Unclosed quotes are ignored.
"""
self.assertEqual(parse_tags('"one'), ['one'])
self.assertEqual(parse_tags('"one two'), ['one', 'two'])
self.assertEqual(parse_tags('"one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('"one two"'), ['one two'])
self.assertEqual(parse_tags('a-one "a-two and a-three"'),
['a-one', 'a-two and a-three'])
| 506 | 516 |
from __future__ import absolute_import, unicode_literals
from unittest import TestCase as UnitTestCase
import django
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.test import TestCase, TransactionTestCase
from django.utils.encoding import force_text
from .forms import CustomPKFoodForm, DirectFoodForm, FoodForm, OfficialFoodForm
from .models import (Article, Child, CustomManager, CustomPKFood,
CustomPKHousePet, CustomPKPet, DirectFood,
DirectHousePet, DirectPet, Food, HousePet, Movie,
OfficialFood, OfficialHousePet, OfficialPet,
OfficialTag, OfficialThroughModel, Pet, Photo,
TaggedCustomPKFood, TaggedCustomPKPet, TaggedFood,
TaggedPet)
from taggit.managers import _model_name, _TaggableManager, TaggableManager
from taggit.models import Tag, TaggedItem
from taggit.utils import edit_string_for_tags, parse_tags
try:
from unittest import skipIf, skipUnless
except ImportError:
from django.utils.unittest import skipIf, skipUnless
class BaseTaggingTest(object):
def assert_tags_equal(self, qs, tags, sort=True, attr="name"):
got = [getattr(obj, attr) for obj in qs]
if sort:
got.sort()
tags.sort()
self.assertEqual(got, tags)
def _get_form_str(self, form_str):
if django.VERSION >= (1, 3):
form_str %= {
"help_start": '<span class="helptext">',
"help_stop": "</span>"
}
else:
form_str %= {
"help_start": "",
"help_stop": ""
}
return form_str
def assert_form_renders(self, form, html):
self.assertHTMLEqual(str(form), self._get_form_str(html))
class BaseTaggingTestCase(TestCase, BaseTaggingTest):
pass
class BaseTaggingTransactionTestCase(TransactionTestCase, BaseTaggingTest):
pass
class TagModelTestCase(BaseTaggingTransactionTestCase):
food_model = Food
tag_model = Tag
def test_unique_slug(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("Red", "red")
def test_update(self):
special = self.tag_model.objects.create(name="special")
special.save()
def test_add(self):
apple = self.food_model.objects.create(name="apple")
yummy = self.tag_model.objects.create(name="yummy")
apple.tags.add(yummy)
def test_slugify(self):
a = Article.objects.create(title="django-taggit 1.0 Released")
a.tags.add("awesome", "release", "AWESOME")
self.assert_tags_equal(a.tags.all(), [
"category-awesome",
"category-release",
"category-awesome-1"
], attr="slug")
def test_integers(self):
"""Adding an integer as a tag should raise a ValueError (#237)."""
apple = self.food_model.objects.create(name="apple")
with self.assertRaisesRegexp(ValueError, (
r"Cannot add 1 \(<(type|class) 'int'>\). "
r"Expected <class 'django.db.models.base.ModelBase'> or str.")):
apple.tags.add(1)
class TagModelDirectTestCase(TagModelTestCase):
food_model = DirectFood
tag_model = Tag
class TagModelCustomPKTestCase(TagModelTestCase):
food_model = CustomPKFood
tag_model = Tag
class TagModelOfficialTestCase(TagModelTestCase):
food_model = OfficialFood
tag_model = OfficialTag
class TaggableManagerTestCase(BaseTaggingTestCase):
food_model = Food
pet_model = Pet
housepet_model = HousePet
taggeditem_model = TaggedItem
tag_model = Tag
def test_add_tag(self):
apple = self.food_model.objects.create(name="apple")
self.assertEqual(list(apple.tags.all()), [])
self.assertEqual(list(self.food_model.tags.all()), [])
apple.tags.add('green')
self.assert_tags_equal(apple.tags.all(), ['green'])
self.assert_tags_equal(self.food_model.tags.all(), ['green'])
pear = self.food_model.objects.create(name="pear")
pear.tags.add('green')
self.assert_tags_equal(pear.tags.all(), ['green'])
self.assert_tags_equal(self.food_model.tags.all(), ['green'])
apple.tags.add('red')
self.assert_tags_equal(apple.tags.all(), ['green', 'red'])
self.assert_tags_equal(self.food_model.tags.all(), ['green', 'red'])
self.assert_tags_equal(
self.food_model.tags.most_common(),
['green', 'red'],
sort=False
)
apple.tags.remove('green')
self.assert_tags_equal(apple.tags.all(), ['red'])
self.assert_tags_equal(self.food_model.tags.all(), ['green', 'red'])
tag = self.tag_model.objects.create(name="delicious")
apple.tags.add(tag)
self.assert_tags_equal(apple.tags.all(), ["red", "delicious"])
apple.delete()
self.assert_tags_equal(self.food_model.tags.all(), ["green"])
def test_add_queries(self):
# Prefill content type cache:
ContentType.objects.get_for_model(self.food_model)
apple = self.food_model.objects.create(name="apple")
# 1 query to see which tags exist
# + 3 queries to create the tags.
# + 6 queries to create the intermediary things (including SELECTs, to
# make sure we don't double create.
# + 12 on Django 1.6 for save points.
queries = 22
if django.VERSION < (1, 6):
queries -= 12
self.assertNumQueries(queries, apple.tags.add, "red", "delicious", "green")
pear = self.food_model.objects.create(name="pear")
# 1 query to see which tags exist
# + 4 queries to create the intermeidary things (including SELECTs, to
# make sure we dont't double create.
# + 4 on Django 1.6 for save points.
queries = 9
if django.VERSION < (1, 6):
queries -= 4
self.assertNumQueries(queries, pear.tags.add, "green", "delicious")
self.assertNumQueries(0, pear.tags.add)
def test_require_pk(self):
food_instance = self.food_model()
self.assertRaises(ValueError, lambda: food_instance.tags.all())
def test_delete_obj(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red")
self.assert_tags_equal(apple.tags.all(), ["red"])
strawberry = self.food_model.objects.create(name="strawberry")
strawberry.tags.add("red")
apple.delete()
self.assert_tags_equal(strawberry.tags.all(), ["red"])
def test_delete_bulk(self):
apple = self.food_model.objects.create(name="apple")
kitty = self.pet_model.objects.create(pk=apple.pk, name="kitty")
apple.tags.add("red", "delicious", "fruit")
kitty.tags.add("feline")
self.food_model.objects.all().delete()
self.assert_tags_equal(kitty.tags.all(), ["feline"])
def test_lookup_by_tag(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red", "green")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green")
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["red"])),
[apple]
)
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["green"])),
[apple, pear]
)
kitty = self.pet_model.objects.create(name="kitty")
kitty.tags.add("fuzzy", "red")
dog = self.pet_model.objects.create(name="dog")
dog.tags.add("woof", "red")
self.assertEqual(
list(self.food_model.objects.filter(tags__name__in=["red"]).distinct()),
[apple]
)
tag = self.tag_model.objects.get(name="woof")
self.assertEqual(list(self.pet_model.objects.filter(tags__in=[tag])), [dog])
cat = self.housepet_model.objects.create(name="cat", trained=True)
cat.tags.add("fuzzy")
pks = self.pet_model.objects.filter(tags__name__in=["fuzzy"])
model_name = self.pet_model.__name__
self.assertQuerysetEqual(pks,
['<{0}: kitty>'.format(model_name),
'<{0}: cat>'.format(model_name)],
ordered=False)
def test_lookup_bulk(self):
apple = self.food_model.objects.create(name="apple")
pear = self.food_model.objects.create(name="pear")
apple.tags.add('fruit', 'green')
pear.tags.add('fruit', 'yummie')
def lookup_qs():
# New fix: directly allow WHERE object_id IN (SELECT id FROM ..)
objects = self.food_model.objects.all()
lookup = self.taggeditem_model.bulk_lookup_kwargs(objects)
list(self.taggeditem_model.objects.filter(**lookup))
def lookup_list():
# Simulate old situation: iterate over a list.
objects = list(self.food_model.objects.all())
lookup = self.taggeditem_model.bulk_lookup_kwargs(objects)
list(self.taggeditem_model.objects.filter(**lookup))
self.assertNumQueries(1, lookup_qs)
self.assertNumQueries(2, lookup_list)
def test_exclude(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("red", "green", "delicious")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green", "delicious")
self.food_model.objects.create(name="guava")
pks = self.food_model.objects.exclude(tags__name__in=["red"])
model_name = self.food_model.__name__
self.assertQuerysetEqual(pks,
['<{0}: pear>'.format(model_name),
'<{0}: guava>'.format(model_name)],
ordered=False)
def test_similarity_by_tag(self):
"""Test that pears are more similar to apples than watermelons"""
apple = self.food_model.objects.create(name="apple")
apple.tags.add("green", "juicy", "small", "sour")
pear = self.food_model.objects.create(name="pear")
pear.tags.add("green", "juicy", "small", "sweet")
watermelon = self.food_model.objects.create(name="watermelon")
watermelon.tags.add("green", "juicy", "large", "sweet")
similar_objs = apple.tags.similar_objects()
self.assertEqual(similar_objs, [pear, watermelon])
self.assertEqual([obj.similar_tags for obj in similar_objs],
[3, 2])
def test_tag_reuse(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("juicy", "juicy")
self.assert_tags_equal(apple.tags.all(), ['juicy'])
def test_query_traverse(self):
spot = self.pet_model.objects.create(name='Spot')
spike = self.pet_model.objects.create(name='Spike')
spot.tags.add('scary')
spike.tags.add('fluffy')
lookup_kwargs = {
'%s__name' % _model_name(self.pet_model): 'Spot'
}
self.assert_tags_equal(
self.tag_model.objects.filter(**lookup_kwargs),
['scary']
)
def test_taggeditem_unicode(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add("juicy")
self.assertEqual(
force_text(self.taggeditem_model.objects.all()[0]),
"apple tagged with juicy"
)
def test_abstract_subclasses(self):
p = Photo.objects.create()
p.tags.add("outdoors", "pretty")
self.assert_tags_equal(
p.tags.all(),
["outdoors", "pretty"]
)
m = Movie.objects.create()
m.tags.add("hd")
self.assert_tags_equal(
m.tags.all(),
["hd"],
)
def test_field_api(self):
# Check if tag field, which simulates m2m, has django-like api.
field = self.food_model._meta.get_field('tags')
self.assertTrue(hasattr(field, 'rel'))
self.assertTrue(hasattr(field.rel, 'to'))
self.assertTrue(hasattr(field, 'related'))
self.assertEqual(self.food_model, field.related.model)
def test_names_method(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('green')
apple.tags.add('red')
self.assertEqual(list(apple.tags.names()), ['green', 'red'])
def test_slugs_method(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('green and juicy')
apple.tags.add('red')
self.assertEqual(list(apple.tags.slugs()), ['green-and-juicy', 'red'])
def test_serializes(self):
apple = self.food_model.objects.create(name="apple")
serializers.serialize("json", (apple,))
def test_prefetch_related(self):
apple = self.food_model.objects.create(name="apple")
apple.tags.add('1', '2')
orange = self.food_model.objects.create(name="orange")
orange.tags.add('2', '4')
with self.assertNumQueries(2):
l = list(self.food_model.objects.prefetch_related('tags').all())
with self.assertNumQueries(0):
foods = dict((f.name, set(t.name for t in f.tags.all())) for f in l)
self.assertEqual(foods, {
'orange': set(['2', '4']),
'apple': set(['1', '2'])
})
class TaggableManagerDirectTestCase(TaggableManagerTestCase):
food_model = DirectFood
pet_model = DirectPet
housepet_model = DirectHousePet
taggeditem_model = TaggedFood
class TaggableManagerCustomPKTestCase(TaggableManagerTestCase):
food_model = CustomPKFood
pet_model = CustomPKPet
housepet_model = CustomPKHousePet
taggeditem_model = TaggedCustomPKFood
def test_require_pk(self):
# TODO with a charfield pk, pk is never None, so taggit has no way to
# tell if the instance is saved or not
pass
class TaggableManagerOfficialTestCase(TaggableManagerTestCase):
food_model = OfficialFood
pet_model = OfficialPet
housepet_model = OfficialHousePet
taggeditem_model = OfficialThroughModel
tag_model = OfficialTag
def test_extra_fields(self):
self.tag_model.objects.create(name="red")
self.tag_model.objects.create(name="delicious", official=True)
apple = self.food_model.objects.create(name="apple")
apple.tags.add("delicious", "red")
pear = self.food_model.objects.create(name="Pear")
pear.tags.add("delicious")
self.assertEqual(apple, self.food_model.objects.get(tags__official=False))
class TaggableManagerInitializationTestCase(TaggableManagerTestCase):
"""Make sure manager override defaults and sets correctly."""
food_model = Food
custom_manager_model = CustomManager
def test_default_manager(self):
self.assertEqual(self.food_model.tags.__class__, _TaggableManager)
def test_custom_manager(self):
self.assertEqual(self.custom_manager_model.tags.__class__, CustomManager.Foo)
class TaggableFormTestCase(BaseTaggingTestCase):
form_class = FoodForm
food_model = Food
def test_form(self):
self.assertEqual(list(self.form_class.base_fields), ['name', 'tags'])
f = self.form_class({'name': 'apple', 'tags': 'green, red, yummy'})
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value="green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
f.save()
apple = self.food_model.objects.get(name='apple')
self.assert_tags_equal(apple.tags.all(), ['green', 'red', 'yummy'])
f = self.form_class({'name': 'apple', 'tags': 'green, red, yummy, delicious'}, instance=apple)
f.save()
apple = self.food_model.objects.get(name='apple')
self.assert_tags_equal(apple.tags.all(), ['green', 'red', 'yummy', 'delicious'])
self.assertEqual(self.food_model.objects.count(), 1)
f = self.form_class({"name": "raspberry"})
self.assertFalse(f.is_valid())
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value="delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
apple.tags.add('has,comma')
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value=""has,comma", delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
apple.tags.add('has space')
f = self.form_class(instance=apple)
self.assert_form_renders(f, """<tr><th><label for="id_name">Name:</label></th><td><input id="id_name" type="text" name="name" value="apple" maxlength="50" /></td></tr>
<tr><th><label for="id_tags">Tags:</label></th><td><input type="text" name="tags" value=""has space", "has,comma", delicious, green, red, yummy" id="id_tags" /><br />%(help_start)sA comma-separated list of tags.%(help_stop)s</td></tr>""")
def test_formfield(self):
tm = TaggableManager(verbose_name='categories', help_text='Add some categories', blank=True)
ff = tm.formfield()
self.assertEqual(ff.label, 'Categories')
self.assertEqual(ff.help_text, 'Add some categories')
self.assertEqual(ff.required, False)
self.assertEqual(ff.clean(""), [])
tm = TaggableManager()
ff = tm.formfield()
self.assertRaises(ValidationError, ff.clean, "")
class TaggableFormDirectTestCase(TaggableFormTestCase):
form_class = DirectFoodForm
food_model = DirectFood
class TaggableFormCustomPKTestCase(TaggableFormTestCase):
form_class = CustomPKFoodForm
food_model = CustomPKFood
class TaggableFormOfficialTestCase(TaggableFormTestCase):
form_class = OfficialFoodForm
food_model = OfficialFood
class TagStringParseTestCase(UnitTestCase):
"""
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
def test_with_simple_space_delimited_tags(self):
"""
Test with simple space-delimited tags.
"""
self.assertEqual(parse_tags('one'), ['one'])
self.assertEqual(parse_tags('one two'), ['one', 'two'])
self.assertEqual(parse_tags('one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('one one two two'), ['one', 'two'])
def test_with_comma_delimited_multiple_words(self):
"""
Test with comma-delimited multiple words.
An unquoted comma in the input will trigger this.
"""
self.assertEqual(parse_tags(',one'), ['one'])
self.assertEqual(parse_tags(',one two'), ['one two'])
self.assertEqual(parse_tags(',one two three'), ['one two three'])
self.assertEqual(parse_tags('a-one, a-two and a-three'),
['a-one', 'a-two and a-three'])
def test_with_double_quoted_multiple_words(self):
"""
Test with double-quoted multiple words.
A completed quote will trigger this. Unclosed quotes are ignored.
"""
self.assertEqual(parse_tags('"one'), ['one'])
self.assertEqual(parse_tags('"one two'), ['one', 'two'])
self.assertEqual(parse_tags('"one two three'), ['one', 'three', 'two'])
self.assertEqual(parse_tags('"one two"'), ['one two'])
self.assertEqual(parse_tags('a-one "a-two and a-three"'),
['a-one', 'a-two and a-three'])
def test_with_no_loose_commas(self):
"""
Test with no loose commas -- split on spaces.
"""
self.assertEqual(parse_tags('one two "thr,ee"'), ['one', 'thr,ee', 'two'])
def test_with_loose_commas(self):
"""
Loose commas - split on commas
"""
self.assertEqual(parse_tags('"one", two three'), ['one', 'two three'])
def test_tags_with_double_quotes_can_contain_commas(self):
"""
Double quotes can contain commas
"""
self.assertEqual(parse_tags('a-one "a-two, and a-three"'),
['a-one', 'a-two, and a-three'])
self.assertEqual(parse_tags('"two", one, one, two, "one"'),
['one', 'two'])
def test_with_naughty_input(self):
"""
Test with naughty input.
"""
# Bad users! Naughty users!
self.assertEqual(parse_tags(None), [])
self.assertEqual(parse_tags(''), [])
self.assertEqual(parse_tags('"'), [])
self.assertEqual(parse_tags('""'), [])
self.assertEqual(parse_tags('"' * 7), [])
self.assertEqual(parse_tags(',,,,,,'), [])
self.assertEqual(parse_tags('",",",",",",","'), [','])
self.assertEqual(parse_tags('a-one "a-two" and "a-three'),
['a-one', 'a-three', 'a-two', 'and'])
def test_recreation_of_tag_list_string_representations(self):
plain = Tag.objects.create(name='plain')
spaces = Tag.objects.create(name='spa ces')
comma = Tag.objects.create(name='com,ma')
self.assertEqual(edit_string_for_tags([plain]), 'plain')
self.assertEqual(edit_string_for_tags([plain, spaces]), '"spa ces", plain')
self.assertEqual(edit_string_for_tags([plain, spaces, comma]), '"com,ma", "spa ces", plain')
self.assertEqual(edit_string_for_tags([plain, comma]), '"com,ma", plain')
self.assertEqual(edit_string_for_tags([comma, spaces]), '"com,ma", "spa ces"')
@skipIf(django.VERSION < (1, 7), "not relevant for Django < 1.7")
class DeconstructTestCase(UnitTestCase):
def test_deconstruct_kwargs_kept(self):
instance = TaggableManager(through=OfficialThroughModel, to='dummy.To')
name, path, args, kwargs = instance.deconstruct()
new_instance = TaggableManager(*args, **kwargs)
self.assertEqual('tests.OfficialThroughModel', new_instance.rel.through)
self.assertEqual('dummy.To', new_instance.rel.to)
@skipUnless(django.VERSION < (1, 7), "test only applies to 1.6 and below")
class SouthSupportTests(TestCase):
def test_import_migrations_module(self):
try:
from taggit.migrations import __doc__ # noqa
except ImproperlyConfigured as e:
exception = e
self.assertIn("SOUTH_MIGRATION_MODULES", exception.args[0])
class InheritedPrefetchTests(TestCase):
def test_inherited_tags_with_prefetch(self):
child = Child()
child.save()
child.tags.add('tag 1', 'tag 2', 'tag 3', 'tag 4')
child = Child.objects.get()
no_prefetch_tags = child.tags.all()
self.assertEquals(4, no_prefetch_tags.count())
child = Child.objects.prefetch_related('tags').get()
prefetch_tags = child.tags.all()
self.assertEquals(4, prefetch_tags.count())
self.assertEquals(set([t.name for t in no_prefetch_tags]),
set([t.name for t in prefetch_tags]))
|
makedir
|
return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
|
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import py
import six
import attr
import pytest
import json
import shutil
from . import paths
from .compat import _PY2 as PY2, Path
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
shutil.rmtree(str(cachedir))
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return paths.resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_config_warning
from _pytest.warning_types import PytestWarning
_issue_config_warning(
PytestWarning(fmt.format(**args) if args else fmt), self._config
)
# MASKED: makedir function (lines 58-73)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
self._ensure_readme()
def _ensure_readme(self):
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, " "will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0
|
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
| 58 | 73 |
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import py
import six
import attr
import pytest
import json
import shutil
from . import paths
from .compat import _PY2 as PY2, Path
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
shutil.rmtree(str(cachedir))
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return paths.resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_config_warning
from _pytest.warning_types import PytestWarning
_issue_config_warning(
PytestWarning(fmt.format(**args) if args else fmt), self._config
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
self._ensure_readme()
def _ensure_readme(self):
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, " "will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0
|
set
|
save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
|
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import py
import six
import attr
import pytest
import json
import shutil
from . import paths
from .compat import _PY2 as PY2, Path
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
shutil.rmtree(str(cachedir))
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return paths.resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_config_warning
from _pytest.warning_types import PytestWarning
_issue_config_warning(
PytestWarning(fmt.format(**args) if args else fmt), self._config
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
# MASKED: set function (lines 96-118)
def _ensure_readme(self):
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, " "will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0
|
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
self._ensure_readme()
| 96 | 118 |
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import py
import six
import attr
import pytest
import json
import shutil
from . import paths
from .compat import _PY2 as PY2, Path
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
shutil.rmtree(str(cachedir))
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return paths.resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_config_warning
from _pytest.warning_types import PytestWarning
_issue_config_warning(
PytestWarning(fmt.format(**args) if args else fmt), self._config
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
self._ensure_readme()
def _ensure_readme(self):
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, " "will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0
|
_compute_delta
|
Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
#from sympy.mpmath import mp
import mpmath as mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in range(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
# MASKED: _compute_delta function (lines 232-252)
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
|
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
| 232 | 252 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
#from sympy.mpmath import mp
import mpmath as mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in range(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
|
_compute_eps
|
Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
#from sympy.mpmath import mp
import mpmath as mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in range(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
# MASKED: _compute_eps function (lines 255-273)
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
|
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
| 255 | 273 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
#from sympy.mpmath import mp
import mpmath as mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in range(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
|
compute_log_moment
|
Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
#from sympy.mpmath import mp
import mpmath as mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in range(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
# MASKED: compute_log_moment function (lines 276-302)
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
|
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
| 276 | 302 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
#from sympy.mpmath import mp
import mpmath as mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in range(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
|
pacf_ols
|
Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
|
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
# MASKED: pacf_ols function (lines 525-557)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
| 525 | 557 |
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
ccovf
|
crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
|
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
# MASKED: ccovf function (lines 626-658)
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
| 626 | 658 |
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
periodogram
|
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
|
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
# MASKED: periodogram function (lines 689-714)
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
| 689 | 714 |
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
_sigma_est_kpss
|
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
|
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
# MASKED: _sigma_est_kpss function (lines 1295-1304)
|
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
| 1,295 | 1,304 |
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
|
__init__
|
Args:
dataclass_types:
Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
kwargs:
(Optional) Passed to `argparse.ArgumentParser()` in the regular way.
|
import json
import re
import sys
from argparse import ArgumentParser, ArgumentTypeError
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, List, NewType, Optional, Tuple, Union
import dataclasses
DataClass = NewType("DataClass", Any)
DataClassType = NewType("DataClassType", Any)
def string_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
)
class KGEArgParser(ArgumentParser):
"""
This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.
The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)
arguments to the parser after initialization and you'll get the output back after parsing as an additional
namespace.
Examples:
>>> from toolbox.KGArgsParser import KGEArgParser
>>> # you should defined these: ModelArguments, DataArguments, TrainingArguments
>>> parser = KGEArgParser((ModelArguments, DataArguments, TrainingArguments))
>>> if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
>>> model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
>>> else:
>>> model_args, data_args, training_args = parser.parse_args_into_dataclasses()
"""
dataclass_types: Iterable[DataClassType]
# MASKED: __init__ function (lines 48-61)
def _add_dataclass_arguments(self, dtype: DataClassType):
for field in dataclasses.fields(dtype):
if not field.init:
continue
field_name = f"--{field.name}"
kwargs = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, str):
raise ImportError(
"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),"
"which can be opted in from Python 3.7 with `from __future__ import annotations`."
"We will add compatibility when Python 3.9 is released."
)
typestring = str(field.type)
for prim_type in (int, float, str):
for collection in (List,):
if (
typestring == f"typing.Union[{collection[prim_type]}, NoneType]"
or typestring == f"typing.Optional[{collection[prim_type]}]"
):
field.type = collection[prim_type]
if (
typestring == f"typing.Union[{prim_type.__name__}, NoneType]"
or typestring == f"typing.Optional[{prim_type.__name__}]"
):
field.type = prim_type
if isinstance(field.type, type) and issubclass(field.type, Enum):
kwargs["choices"] = [x.value for x in field.type]
kwargs["type"] = type(kwargs["choices"][0])
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
else:
kwargs["required"] = True
elif field.type is bool or field.type == Optional[bool]:
if field.default is True:
self.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **kwargs)
# Hack because type=bool in argparse does not behave as we want.
kwargs["type"] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is True if we have no default when of type bool.
default = True if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
kwargs["default"] = default
# This tells argparse we accept 0 or 1 value after --field_name
kwargs["nargs"] = "?"
# This is the value that will get picked if we do --field_name (without value)
kwargs["const"] = True
elif (
hasattr(field.type, "__origin__") and re.search(r"^typing\.List\[(.*)\]$",
str(field.type)) is not None
):
kwargs["nargs"] = "+"
kwargs["type"] = field.type.__args__[0]
assert all(
x == kwargs["type"] for x in field.type.__args__
), f"{field.name} cannot be a List of mixed types"
if field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
elif field.default is dataclasses.MISSING:
kwargs["required"] = True
else:
kwargs["type"] = field.type
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
elif field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
else:
kwargs["required"] = True
self.add_argument(field_name, **kwargs)
def parse_args_into_dataclasses(
self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None
) -> Tuple[DataClass, ...]:
"""
Parse command-line args into instances of the specified dataclass types.
This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
Args:
args:
List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
return_remaining_strings:
If true, also return a list of remaining argument strings.
look_for_args_file:
If true, will look for a ".args" file with the same base name as the entry point script for this
process, and will append its potential content to the command line args.
args_filename:
If not None, will uses this file instead of the ".args" file specified in the previous argument.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.abspath
- if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
after initialization.
- The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)
"""
if args_filename or (look_for_args_file and len(sys.argv)):
if args_filename:
args_file = Path(args_filename)
else:
args_file = Path(sys.argv[0]).with_suffix(".args")
if args_file.exists():
fargs = args_file.read_text().split()
args = fargs + args if args is not None else fargs + sys.argv[1:]
# in case of duplicate arguments the first one has precedence
# so we append rather than prepend.
namespace, remaining_args = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in vars(namespace).items() if k in keys}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the KGEArgParser: {remaining_args}")
return (*outputs,)
def parse_json_file(self, json_file: str) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
dataclass types.
"""
data = json.loads(Path(json_file).read_text())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in data.items() if k in keys}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
def parse_dict(self, args: dict) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
types.
"""
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in args.items() if k in keys}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
# class KGEArgParser:
# """The class implements the argument parser for the pykg2vec.
#
# KGEArgParser defines all the necessary arguments for the global and local
# configuration of all the modules.
#
# Attributes:
# general_group (object): It parses the general arguements used by most of the modules.
# general_hyper_group (object): It parses the arguments for the hyper-parameter tuning.
#
# Examples:
# >>> from toolbox.KGArgs import KGEArgParser
# >>> args = KGEArgParser().get_args()
# """
#
# def __init__(self):
# self.parser = ArgumentParser(description='Knowledge Graph Embedding tunable configs.')
#
# ''' argument group for hyperparameters '''
# self.general_hyper_group = self.parser.add_argument_group('Generic Hyperparameters')
# self.general_hyper_group.add_argument('-lmda', dest='lmbda', default=0.1, type=float,
# help='The lmbda for regularization.')
# self.general_hyper_group.add_argument('-b', dest='batch_size', default=128, type=int,
# help='training batch size')
# self.general_hyper_group.add_argument('-mg', dest='margin', default=0.8, type=float,
# help='Margin to take')
# self.general_hyper_group.add_argument('-opt', dest='optimizer', default='adam', type=str,
# help='optimizer to be used in training.')
# self.general_hyper_group.add_argument('-s', dest='sampling', default='uniform', type=str,
# help='strategy to do negative sampling.')
# self.general_hyper_group.add_argument('-ngr', dest='neg_rate', default=1, type=int,
# help='The number of negative samples generated per positive one.')
# self.general_hyper_group.add_argument('-l', dest='epochs', default=100, type=int,
# help='The total number of Epochs')
# self.general_hyper_group.add_argument('-lr', dest='learning_rate', default=0.01, type=float,
# help='learning rate')
# self.general_hyper_group.add_argument('-k', dest='hidden_size', default=50, type=int,
# help='Hidden embedding size.')
# self.general_hyper_group.add_argument('-km', dest='ent_hidden_size', default=50, type=int,
# help="Hidden embedding size for entities.")
# self.general_hyper_group.add_argument('-kr', dest='rel_hidden_size', default=50, type=int,
# help="Hidden embedding size for relations.")
# self.general_hyper_group.add_argument('-k2', dest='hidden_size_1', default=10, type=int,
# help="Hidden embedding size for relations.")
# self.general_hyper_group.add_argument('-l1', dest='l1_flag', default=True,
# type=lambda x: (str(x).lower() == 'true'),
# help='The flag of using L1 or L2 norm.')
# self.general_hyper_group.add_argument('-al', dest='alpha', default=0.1, type=float,
# help='The alpha used in self-adversarial negative sampling.')
# self.general_hyper_group.add_argument('-fsize', dest='filter_sizes', default=[1, 2, 3], nargs='+', type=int,
# help='Filter sizes to be used in convKB which acts as the widths of the kernals')
# self.general_hyper_group.add_argument('-fnum', dest='num_filters', default=50, type=int,
# help='Filter numbers to be used in convKB and InteractE.')
# self.general_hyper_group.add_argument('-fmd', dest='feature_map_dropout', default=0.2, type=float,
# help='feature map dropout value used in ConvE and InteractE.')
# self.general_hyper_group.add_argument('-idt', dest='input_dropout', default=0.3, type=float,
# help='input dropout value used in ConvE and InteractE.')
# self.general_hyper_group.add_argument('-hdt', dest='hidden_dropout', default=0.3, type=float,
# help='hidden dropout value used in ConvE.')
# self.general_hyper_group.add_argument('-hdt1', dest='hidden_dropout1', default=0.4, type=float,
# help='hidden dropout value used in TuckER.')
# self.general_hyper_group.add_argument('-hdt2', dest='hidden_dropout2', default=0.5, type=float,
# help='hidden dropout value used in TuckER.')
# self.general_hyper_group.add_argument('-lbs', dest='label_smoothing', default=0.1, type=float,
# help='The parameter used in label smoothing.')
# self.general_hyper_group.add_argument('-cmax', dest='cmax', default=0.05, type=float,
# help='The parameter for clipping values for KG2E.')
# self.general_hyper_group.add_argument('-cmin', dest='cmin', default=5.00, type=float,
# help='The parameter for clipping values for KG2E.')
# self.general_hyper_group.add_argument('-fp', dest='feature_permutation', default=1, type=int,
# help='The number of feature permutations for InteractE.')
# self.general_hyper_group.add_argument('-rh', dest='reshape_height', default=20, type=int,
# help='The height of the reshaped matrix for InteractE.')
# self.general_hyper_group.add_argument('-rw', dest='reshape_width', default=10, type=int,
# help='The width of the reshaped matrix for InteractE.')
# self.general_hyper_group.add_argument('-ks', dest='kernel_size', default=9, type=int,
# help='The kernel size to use for InteractE.')
# self.general_hyper_group.add_argument('-ic', dest='in_channels', default=9, type=int,
# help='The kernel size to use for InteractE.')
# self.general_hyper_group.add_argument('-evd', dest='ent_vec_dim', default=200, type=int, help='.')
# self.general_hyper_group.add_argument('-rvd', dest='rel_vec_dim', default=200, type=int, help='.')
#
# # basic configs
# self.general_group = self.parser.add_argument_group('Generic')
# self.general_group.add_argument('-mn', dest='model_name', default='TransE', type=str, help='Name of model')
# self.general_group.add_argument('-db', dest='debug', default=False, type=lambda x: (str(x).lower() == 'true'),
# help='To use debug mode or not.')
# self.general_group.add_argument('-exp', dest='exp', default=False, type=lambda x: (str(x).lower() == 'true'),
# help='Use Experimental setting extracted from original paper. (use Freebase15k by default)')
# self.general_group.add_argument('-ds', dest='dataset_name', default='Freebase15k', type=str,
# help='The dataset name (choice: fb15k/wn18/wn18_rr/yago/fb15k_237/ks/nations/umls)')
# self.general_group.add_argument('-dsp', dest='dataset_path', default=None, type=str,
# help='The path to custom dataset.')
# self.general_group.add_argument('-ld', dest='load_from_data', default=None, type=str,
# help='The path to the pretrained model.')
# self.general_group.add_argument('-sv', dest='save_model', default=True,
# type=lambda x: (str(x).lower() == 'true'), help='Save the model!')
# self.general_group.add_argument('-tn', dest='test_num', default=1000, type=int,
# help='The total number of test triples')
# self.general_group.add_argument('-ts', dest='test_step', default=10, type=int, help='Test every _ epochs')
# self.general_group.add_argument('-t', dest='tmp', default='../intermediate', type=str,
# help='The folder name to store trained parameters.')
# self.general_group.add_argument('-r', dest='result', default='../results', type=str,
# help='The folder name to save the results.')
# self.general_group.add_argument('-fig', dest='figures', default='../figures', type=str,
# help='The folder name to save the figures.')
# self.general_group.add_argument('-plote', dest='plot_embedding', default=False,
# type=lambda x: (str(x).lower() == 'true'), help='Plot the entity only!')
# self.general_group.add_argument('-plot', dest='plot_entity_only', default=False,
# type=lambda x: (str(x).lower() == 'true'), help='Plot the entity only!')
# self.general_group.add_argument('-device', dest='device', default='cpu', type=str, choices=['cpu', 'cuda'],
# help='Device to run pykg2vec (cpu or cuda).')
# self.general_group.add_argument('-npg', dest='num_process_gen', default=2, type=int,
# help='number of processes used in the Generator.')
# self.general_group.add_argument('-hpf', dest='hp_abs_file', default=None, type=str,
# help='The path to the hyperparameter configuration YAML file.')
# self.general_group.add_argument('-ssf', dest='ss_abs_file', default=None, type=str,
# help='The path to the search space configuration YAML file.')
# self.general_group.add_argument('-mt', dest='max_number_trials', default=100, type=int,
# help='The maximum times of trials for bayesian optimizer.')
#
# def get_args(self, args):
# """This function parses the necessary arguments.
#
# This function is called to parse all the necessary arguments.
#
# Returns:
# object: ArgumentParser object.
# """
# return self.parser.parse_args(args)
|
def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):
"""
Args:
dataclass_types:
Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
kwargs:
(Optional) Passed to `argparse.ArgumentParser()` in the regular way.
"""
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = dataclass_types
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype)
| 48 | 61 |
import json
import re
import sys
from argparse import ArgumentParser, ArgumentTypeError
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, List, NewType, Optional, Tuple, Union
import dataclasses
DataClass = NewType("DataClass", Any)
DataClassType = NewType("DataClassType", Any)
def string_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
)
class KGEArgParser(ArgumentParser):
"""
This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.
The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)
arguments to the parser after initialization and you'll get the output back after parsing as an additional
namespace.
Examples:
>>> from toolbox.KGArgsParser import KGEArgParser
>>> # you should defined these: ModelArguments, DataArguments, TrainingArguments
>>> parser = KGEArgParser((ModelArguments, DataArguments, TrainingArguments))
>>> if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
>>> model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
>>> else:
>>> model_args, data_args, training_args = parser.parse_args_into_dataclasses()
"""
dataclass_types: Iterable[DataClassType]
def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):
"""
Args:
dataclass_types:
Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
kwargs:
(Optional) Passed to `argparse.ArgumentParser()` in the regular way.
"""
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = dataclass_types
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype)
def _add_dataclass_arguments(self, dtype: DataClassType):
for field in dataclasses.fields(dtype):
if not field.init:
continue
field_name = f"--{field.name}"
kwargs = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, str):
raise ImportError(
"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),"
"which can be opted in from Python 3.7 with `from __future__ import annotations`."
"We will add compatibility when Python 3.9 is released."
)
typestring = str(field.type)
for prim_type in (int, float, str):
for collection in (List,):
if (
typestring == f"typing.Union[{collection[prim_type]}, NoneType]"
or typestring == f"typing.Optional[{collection[prim_type]}]"
):
field.type = collection[prim_type]
if (
typestring == f"typing.Union[{prim_type.__name__}, NoneType]"
or typestring == f"typing.Optional[{prim_type.__name__}]"
):
field.type = prim_type
if isinstance(field.type, type) and issubclass(field.type, Enum):
kwargs["choices"] = [x.value for x in field.type]
kwargs["type"] = type(kwargs["choices"][0])
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
else:
kwargs["required"] = True
elif field.type is bool or field.type == Optional[bool]:
if field.default is True:
self.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **kwargs)
# Hack because type=bool in argparse does not behave as we want.
kwargs["type"] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is True if we have no default when of type bool.
default = True if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
kwargs["default"] = default
# This tells argparse we accept 0 or 1 value after --field_name
kwargs["nargs"] = "?"
# This is the value that will get picked if we do --field_name (without value)
kwargs["const"] = True
elif (
hasattr(field.type, "__origin__") and re.search(r"^typing\.List\[(.*)\]$",
str(field.type)) is not None
):
kwargs["nargs"] = "+"
kwargs["type"] = field.type.__args__[0]
assert all(
x == kwargs["type"] for x in field.type.__args__
), f"{field.name} cannot be a List of mixed types"
if field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
elif field.default is dataclasses.MISSING:
kwargs["required"] = True
else:
kwargs["type"] = field.type
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
elif field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
else:
kwargs["required"] = True
self.add_argument(field_name, **kwargs)
def parse_args_into_dataclasses(
self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None
) -> Tuple[DataClass, ...]:
"""
Parse command-line args into instances of the specified dataclass types.
This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
Args:
args:
List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
return_remaining_strings:
If true, also return a list of remaining argument strings.
look_for_args_file:
If true, will look for a ".args" file with the same base name as the entry point script for this
process, and will append its potential content to the command line args.
args_filename:
If not None, will uses this file instead of the ".args" file specified in the previous argument.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.abspath
- if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
after initialization.
- The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)
"""
if args_filename or (look_for_args_file and len(sys.argv)):
if args_filename:
args_file = Path(args_filename)
else:
args_file = Path(sys.argv[0]).with_suffix(".args")
if args_file.exists():
fargs = args_file.read_text().split()
args = fargs + args if args is not None else fargs + sys.argv[1:]
# in case of duplicate arguments the first one has precedence
# so we append rather than prepend.
namespace, remaining_args = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in vars(namespace).items() if k in keys}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the KGEArgParser: {remaining_args}")
return (*outputs,)
def parse_json_file(self, json_file: str) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
dataclass types.
"""
data = json.loads(Path(json_file).read_text())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in data.items() if k in keys}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
def parse_dict(self, args: dict) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
types.
"""
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in args.items() if k in keys}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
# class KGEArgParser:
# """The class implements the argument parser for the pykg2vec.
#
# KGEArgParser defines all the necessary arguments for the global and local
# configuration of all the modules.
#
# Attributes:
# general_group (object): It parses the general arguements used by most of the modules.
# general_hyper_group (object): It parses the arguments for the hyper-parameter tuning.
#
# Examples:
# >>> from toolbox.KGArgs import KGEArgParser
# >>> args = KGEArgParser().get_args()
# """
#
# def __init__(self):
# self.parser = ArgumentParser(description='Knowledge Graph Embedding tunable configs.')
#
# ''' argument group for hyperparameters '''
# self.general_hyper_group = self.parser.add_argument_group('Generic Hyperparameters')
# self.general_hyper_group.add_argument('-lmda', dest='lmbda', default=0.1, type=float,
# help='The lmbda for regularization.')
# self.general_hyper_group.add_argument('-b', dest='batch_size', default=128, type=int,
# help='training batch size')
# self.general_hyper_group.add_argument('-mg', dest='margin', default=0.8, type=float,
# help='Margin to take')
# self.general_hyper_group.add_argument('-opt', dest='optimizer', default='adam', type=str,
# help='optimizer to be used in training.')
# self.general_hyper_group.add_argument('-s', dest='sampling', default='uniform', type=str,
# help='strategy to do negative sampling.')
# self.general_hyper_group.add_argument('-ngr', dest='neg_rate', default=1, type=int,
# help='The number of negative samples generated per positive one.')
# self.general_hyper_group.add_argument('-l', dest='epochs', default=100, type=int,
# help='The total number of Epochs')
# self.general_hyper_group.add_argument('-lr', dest='learning_rate', default=0.01, type=float,
# help='learning rate')
# self.general_hyper_group.add_argument('-k', dest='hidden_size', default=50, type=int,
# help='Hidden embedding size.')
# self.general_hyper_group.add_argument('-km', dest='ent_hidden_size', default=50, type=int,
# help="Hidden embedding size for entities.")
# self.general_hyper_group.add_argument('-kr', dest='rel_hidden_size', default=50, type=int,
# help="Hidden embedding size for relations.")
# self.general_hyper_group.add_argument('-k2', dest='hidden_size_1', default=10, type=int,
# help="Hidden embedding size for relations.")
# self.general_hyper_group.add_argument('-l1', dest='l1_flag', default=True,
# type=lambda x: (str(x).lower() == 'true'),
# help='The flag of using L1 or L2 norm.')
# self.general_hyper_group.add_argument('-al', dest='alpha', default=0.1, type=float,
# help='The alpha used in self-adversarial negative sampling.')
# self.general_hyper_group.add_argument('-fsize', dest='filter_sizes', default=[1, 2, 3], nargs='+', type=int,
# help='Filter sizes to be used in convKB which acts as the widths of the kernals')
# self.general_hyper_group.add_argument('-fnum', dest='num_filters', default=50, type=int,
# help='Filter numbers to be used in convKB and InteractE.')
# self.general_hyper_group.add_argument('-fmd', dest='feature_map_dropout', default=0.2, type=float,
# help='feature map dropout value used in ConvE and InteractE.')
# self.general_hyper_group.add_argument('-idt', dest='input_dropout', default=0.3, type=float,
# help='input dropout value used in ConvE and InteractE.')
# self.general_hyper_group.add_argument('-hdt', dest='hidden_dropout', default=0.3, type=float,
# help='hidden dropout value used in ConvE.')
# self.general_hyper_group.add_argument('-hdt1', dest='hidden_dropout1', default=0.4, type=float,
# help='hidden dropout value used in TuckER.')
# self.general_hyper_group.add_argument('-hdt2', dest='hidden_dropout2', default=0.5, type=float,
# help='hidden dropout value used in TuckER.')
# self.general_hyper_group.add_argument('-lbs', dest='label_smoothing', default=0.1, type=float,
# help='The parameter used in label smoothing.')
# self.general_hyper_group.add_argument('-cmax', dest='cmax', default=0.05, type=float,
# help='The parameter for clipping values for KG2E.')
# self.general_hyper_group.add_argument('-cmin', dest='cmin', default=5.00, type=float,
# help='The parameter for clipping values for KG2E.')
# self.general_hyper_group.add_argument('-fp', dest='feature_permutation', default=1, type=int,
# help='The number of feature permutations for InteractE.')
# self.general_hyper_group.add_argument('-rh', dest='reshape_height', default=20, type=int,
# help='The height of the reshaped matrix for InteractE.')
# self.general_hyper_group.add_argument('-rw', dest='reshape_width', default=10, type=int,
# help='The width of the reshaped matrix for InteractE.')
# self.general_hyper_group.add_argument('-ks', dest='kernel_size', default=9, type=int,
# help='The kernel size to use for InteractE.')
# self.general_hyper_group.add_argument('-ic', dest='in_channels', default=9, type=int,
# help='The kernel size to use for InteractE.')
# self.general_hyper_group.add_argument('-evd', dest='ent_vec_dim', default=200, type=int, help='.')
# self.general_hyper_group.add_argument('-rvd', dest='rel_vec_dim', default=200, type=int, help='.')
#
# # basic configs
# self.general_group = self.parser.add_argument_group('Generic')
# self.general_group.add_argument('-mn', dest='model_name', default='TransE', type=str, help='Name of model')
# self.general_group.add_argument('-db', dest='debug', default=False, type=lambda x: (str(x).lower() == 'true'),
# help='To use debug mode or not.')
# self.general_group.add_argument('-exp', dest='exp', default=False, type=lambda x: (str(x).lower() == 'true'),
# help='Use Experimental setting extracted from original paper. (use Freebase15k by default)')
# self.general_group.add_argument('-ds', dest='dataset_name', default='Freebase15k', type=str,
# help='The dataset name (choice: fb15k/wn18/wn18_rr/yago/fb15k_237/ks/nations/umls)')
# self.general_group.add_argument('-dsp', dest='dataset_path', default=None, type=str,
# help='The path to custom dataset.')
# self.general_group.add_argument('-ld', dest='load_from_data', default=None, type=str,
# help='The path to the pretrained model.')
# self.general_group.add_argument('-sv', dest='save_model', default=True,
# type=lambda x: (str(x).lower() == 'true'), help='Save the model!')
# self.general_group.add_argument('-tn', dest='test_num', default=1000, type=int,
# help='The total number of test triples')
# self.general_group.add_argument('-ts', dest='test_step', default=10, type=int, help='Test every _ epochs')
# self.general_group.add_argument('-t', dest='tmp', default='../intermediate', type=str,
# help='The folder name to store trained parameters.')
# self.general_group.add_argument('-r', dest='result', default='../results', type=str,
# help='The folder name to save the results.')
# self.general_group.add_argument('-fig', dest='figures', default='../figures', type=str,
# help='The folder name to save the figures.')
# self.general_group.add_argument('-plote', dest='plot_embedding', default=False,
# type=lambda x: (str(x).lower() == 'true'), help='Plot the entity only!')
# self.general_group.add_argument('-plot', dest='plot_entity_only', default=False,
# type=lambda x: (str(x).lower() == 'true'), help='Plot the entity only!')
# self.general_group.add_argument('-device', dest='device', default='cpu', type=str, choices=['cpu', 'cuda'],
# help='Device to run pykg2vec (cpu or cuda).')
# self.general_group.add_argument('-npg', dest='num_process_gen', default=2, type=int,
# help='number of processes used in the Generator.')
# self.general_group.add_argument('-hpf', dest='hp_abs_file', default=None, type=str,
# help='The path to the hyperparameter configuration YAML file.')
# self.general_group.add_argument('-ssf', dest='ss_abs_file', default=None, type=str,
# help='The path to the search space configuration YAML file.')
# self.general_group.add_argument('-mt', dest='max_number_trials', default=100, type=int,
# help='The maximum times of trials for bayesian optimizer.')
#
# def get_args(self, args):
# """This function parses the necessary arguments.
#
# This function is called to parse all the necessary arguments.
#
# Returns:
# object: ArgumentParser object.
# """
# return self.parser.parse_args(args)
|
__init__
|
:param application: The application to associate this popup dialog with.
:type application: :py:class:`.KingPhisherClientApplication`
:param str hostname: The hostname associated with the key.
:param key: The host's SSH key.
:type key: :py:class:`paramiko.pkey.PKey`
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/ssh_host_key.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import hashlib
import logging
import os
from king_phisher import its
from king_phisher import errors
from king_phisher.client import gui_utilities
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Pango
import paramiko
import paramiko.hostkeys
__all__ = ('HostKeyAcceptDialog', 'HostKeyWarnDialog')
class BaseHostKeyDialog(gui_utilities.GladeGObject):
"""
A base class for dialogs which show information about SSH host keys. It is
assumed that the widgets defined in :py:attr:`.dependencies` are present
including one button to accept the host key, and one to reject. The class's
default response can be set using :py:attr:`.default_response`.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_accept',
'button_reject',
'textview_key_details'
),
top_level=(
'StockApplyImage',
'StockStopImage'
)
)
top_gobject = 'dialog'
default_response = None
"""The response that should be selected as the default for the dialog."""
# MASKED: __init__ function (lines 72-88)
@property
def key_details(self):
key_type = self.key.get_name().lower()
details = "Host: {0} ({1})\n".format(self.hostname, key_type)
if key_type.startswith('ssh-'):
key_type = key_type[4:]
key_type = key_type.split('-', 1)[0].upper()
details += "{0} key fingerprint is SHA256:{1}.\n".format(key_type, base64.b64encode(hashlib.new('sha256', self.key.asbytes()).digest()).decode('utf-8'))
details += "{0} key fingerprint is MD5:{1}.\n".format(key_type, binascii.b2a_hex(hashlib.new('md5', self.key.asbytes()).digest()).decode('utf-8'))
return details
def interact(self):
self.dialog.show_all()
response = self.dialog.run()
self.dialog.destroy()
return response
class HostKeyAcceptDialog(BaseHostKeyDialog):
"""
A dialog that shows an SSH host key for a host that has not previously had
one associated with it.
"""
default_button = Gtk.ResponseType.ACCEPT
class HostKeyWarnDialog(BaseHostKeyDialog):
"""
A dialog that warns about an SSH host key that does not match the one that
was previously stored for the host.
"""
default_button = Gtk.ResponseType.REJECT
def signal_checkbutton_toggled(self, button):
self.gobjects['button_accept'].set_sensitive(button.get_property('active'))
class MissingHostKeyPolicy(paramiko.MissingHostKeyPolicy):
"""
A host key policy for use with paramiko that will validate SSH host keys
correctly. If a key is new, the user will be prompted with
:py:class:`.HostKeyAcceptDialog` dialog to accept it or if the host key does
not match the user will be warned with :py:class:`.HostKeyWarnDialog`. The
host keys accepted through this policy are stored in an OpenSSH compatible
"known_hosts" file using paramiko.
"""
def __init__(self, application):
"""
:param application: The application which is using this policy.
:type application: :py:class:`.KingPhisherClientApplication`
"""
self.application = application
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
super(MissingHostKeyPolicy, self).__init__()
def missing_host_key(self, client, hostname, key):
host_key_fingerprint = 'sha256:' + base64.b64encode(hashlib.new('sha256', key.asbytes()).digest()).decode('utf-8')
host_keys = paramiko.hostkeys.HostKeys()
host_keys_modified = False
known_hosts_file = self.application.config.get('ssh_known_hosts_file', os.path.join(GLib.get_user_config_dir(), 'king-phisher', 'known_hosts'))
if os.access(known_hosts_file, os.R_OK):
host_keys.load(known_hosts_file)
if host_keys.lookup(hostname):
if host_keys.check(hostname, key):
self.logger.debug("accepting known ssh host key {0} {1} {2}".format(hostname, key.get_name(), host_key_fingerprint))
return
self.logger.warning("ssh host key does not match known value for {0}".format(hostname))
dialog = HostKeyWarnDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('bad ssh host key for ' + hostname)
else:
dialog = HostKeyAcceptDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('unknown ssh host key not accepted by the user for ' + hostname)
host_keys.add(hostname, key.get_name(), key)
host_keys_modified = True
if host_keys_modified:
try:
host_keys.save(known_hosts_file)
os.chmod(known_hosts_file, 0o600)
except IOError if its.py_v2 else PermissionError:
self.logger.warning('failed to save the known_hosts file and set its permissions')
|
def __init__(self, application, hostname, key):
"""
:param application: The application to associate this popup dialog with.
:type application: :py:class:`.KingPhisherClientApplication`
:param str hostname: The hostname associated with the key.
:param key: The host's SSH key.
:type key: :py:class:`paramiko.pkey.PKey`
"""
super(BaseHostKeyDialog, self).__init__(application)
self.hostname = hostname
self.key = key
textview = self.gobjects['textview_key_details']
textview.modify_font(Pango.FontDescription('monospace 9'))
textview.get_buffer().set_text(self.key_details)
if self.default_response is not None:
button = self.dialog.get_widget_for_response(response_id=self.default_response)
button.grab_default()
| 72 | 88 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/ssh_host_key.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import hashlib
import logging
import os
from king_phisher import its
from king_phisher import errors
from king_phisher.client import gui_utilities
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Pango
import paramiko
import paramiko.hostkeys
__all__ = ('HostKeyAcceptDialog', 'HostKeyWarnDialog')
class BaseHostKeyDialog(gui_utilities.GladeGObject):
"""
A base class for dialogs which show information about SSH host keys. It is
assumed that the widgets defined in :py:attr:`.dependencies` are present
including one button to accept the host key, and one to reject. The class's
default response can be set using :py:attr:`.default_response`.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_accept',
'button_reject',
'textview_key_details'
),
top_level=(
'StockApplyImage',
'StockStopImage'
)
)
top_gobject = 'dialog'
default_response = None
"""The response that should be selected as the default for the dialog."""
def __init__(self, application, hostname, key):
"""
:param application: The application to associate this popup dialog with.
:type application: :py:class:`.KingPhisherClientApplication`
:param str hostname: The hostname associated with the key.
:param key: The host's SSH key.
:type key: :py:class:`paramiko.pkey.PKey`
"""
super(BaseHostKeyDialog, self).__init__(application)
self.hostname = hostname
self.key = key
textview = self.gobjects['textview_key_details']
textview.modify_font(Pango.FontDescription('monospace 9'))
textview.get_buffer().set_text(self.key_details)
if self.default_response is not None:
button = self.dialog.get_widget_for_response(response_id=self.default_response)
button.grab_default()
@property
def key_details(self):
key_type = self.key.get_name().lower()
details = "Host: {0} ({1})\n".format(self.hostname, key_type)
if key_type.startswith('ssh-'):
key_type = key_type[4:]
key_type = key_type.split('-', 1)[0].upper()
details += "{0} key fingerprint is SHA256:{1}.\n".format(key_type, base64.b64encode(hashlib.new('sha256', self.key.asbytes()).digest()).decode('utf-8'))
details += "{0} key fingerprint is MD5:{1}.\n".format(key_type, binascii.b2a_hex(hashlib.new('md5', self.key.asbytes()).digest()).decode('utf-8'))
return details
def interact(self):
self.dialog.show_all()
response = self.dialog.run()
self.dialog.destroy()
return response
class HostKeyAcceptDialog(BaseHostKeyDialog):
"""
A dialog that shows an SSH host key for a host that has not previously had
one associated with it.
"""
default_button = Gtk.ResponseType.ACCEPT
class HostKeyWarnDialog(BaseHostKeyDialog):
"""
A dialog that warns about an SSH host key that does not match the one that
was previously stored for the host.
"""
default_button = Gtk.ResponseType.REJECT
def signal_checkbutton_toggled(self, button):
self.gobjects['button_accept'].set_sensitive(button.get_property('active'))
class MissingHostKeyPolicy(paramiko.MissingHostKeyPolicy):
"""
A host key policy for use with paramiko that will validate SSH host keys
correctly. If a key is new, the user will be prompted with
:py:class:`.HostKeyAcceptDialog` dialog to accept it or if the host key does
not match the user will be warned with :py:class:`.HostKeyWarnDialog`. The
host keys accepted through this policy are stored in an OpenSSH compatible
"known_hosts" file using paramiko.
"""
def __init__(self, application):
"""
:param application: The application which is using this policy.
:type application: :py:class:`.KingPhisherClientApplication`
"""
self.application = application
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
super(MissingHostKeyPolicy, self).__init__()
def missing_host_key(self, client, hostname, key):
host_key_fingerprint = 'sha256:' + base64.b64encode(hashlib.new('sha256', key.asbytes()).digest()).decode('utf-8')
host_keys = paramiko.hostkeys.HostKeys()
host_keys_modified = False
known_hosts_file = self.application.config.get('ssh_known_hosts_file', os.path.join(GLib.get_user_config_dir(), 'king-phisher', 'known_hosts'))
if os.access(known_hosts_file, os.R_OK):
host_keys.load(known_hosts_file)
if host_keys.lookup(hostname):
if host_keys.check(hostname, key):
self.logger.debug("accepting known ssh host key {0} {1} {2}".format(hostname, key.get_name(), host_key_fingerprint))
return
self.logger.warning("ssh host key does not match known value for {0}".format(hostname))
dialog = HostKeyWarnDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('bad ssh host key for ' + hostname)
else:
dialog = HostKeyAcceptDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('unknown ssh host key not accepted by the user for ' + hostname)
host_keys.add(hostname, key.get_name(), key)
host_keys_modified = True
if host_keys_modified:
try:
host_keys.save(known_hosts_file)
os.chmod(known_hosts_file, 0o600)
except IOError if its.py_v2 else PermissionError:
self.logger.warning('failed to save the known_hosts file and set its permissions')
|
__init__
|
:param application: The application which is using this policy.
:type application: :py:class:`.KingPhisherClientApplication`
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/ssh_host_key.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import hashlib
import logging
import os
from king_phisher import its
from king_phisher import errors
from king_phisher.client import gui_utilities
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Pango
import paramiko
import paramiko.hostkeys
__all__ = ('HostKeyAcceptDialog', 'HostKeyWarnDialog')
class BaseHostKeyDialog(gui_utilities.GladeGObject):
"""
A base class for dialogs which show information about SSH host keys. It is
assumed that the widgets defined in :py:attr:`.dependencies` are present
including one button to accept the host key, and one to reject. The class's
default response can be set using :py:attr:`.default_response`.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_accept',
'button_reject',
'textview_key_details'
),
top_level=(
'StockApplyImage',
'StockStopImage'
)
)
top_gobject = 'dialog'
default_response = None
"""The response that should be selected as the default for the dialog."""
def __init__(self, application, hostname, key):
"""
:param application: The application to associate this popup dialog with.
:type application: :py:class:`.KingPhisherClientApplication`
:param str hostname: The hostname associated with the key.
:param key: The host's SSH key.
:type key: :py:class:`paramiko.pkey.PKey`
"""
super(BaseHostKeyDialog, self).__init__(application)
self.hostname = hostname
self.key = key
textview = self.gobjects['textview_key_details']
textview.modify_font(Pango.FontDescription('monospace 9'))
textview.get_buffer().set_text(self.key_details)
if self.default_response is not None:
button = self.dialog.get_widget_for_response(response_id=self.default_response)
button.grab_default()
@property
def key_details(self):
key_type = self.key.get_name().lower()
details = "Host: {0} ({1})\n".format(self.hostname, key_type)
if key_type.startswith('ssh-'):
key_type = key_type[4:]
key_type = key_type.split('-', 1)[0].upper()
details += "{0} key fingerprint is SHA256:{1}.\n".format(key_type, base64.b64encode(hashlib.new('sha256', self.key.asbytes()).digest()).decode('utf-8'))
details += "{0} key fingerprint is MD5:{1}.\n".format(key_type, binascii.b2a_hex(hashlib.new('md5', self.key.asbytes()).digest()).decode('utf-8'))
return details
def interact(self):
self.dialog.show_all()
response = self.dialog.run()
self.dialog.destroy()
return response
class HostKeyAcceptDialog(BaseHostKeyDialog):
"""
A dialog that shows an SSH host key for a host that has not previously had
one associated with it.
"""
default_button = Gtk.ResponseType.ACCEPT
class HostKeyWarnDialog(BaseHostKeyDialog):
"""
A dialog that warns about an SSH host key that does not match the one that
was previously stored for the host.
"""
default_button = Gtk.ResponseType.REJECT
def signal_checkbutton_toggled(self, button):
self.gobjects['button_accept'].set_sensitive(button.get_property('active'))
class MissingHostKeyPolicy(paramiko.MissingHostKeyPolicy):
"""
A host key policy for use with paramiko that will validate SSH host keys
correctly. If a key is new, the user will be prompted with
:py:class:`.HostKeyAcceptDialog` dialog to accept it or if the host key does
not match the user will be warned with :py:class:`.HostKeyWarnDialog`. The
host keys accepted through this policy are stored in an OpenSSH compatible
"known_hosts" file using paramiko.
"""
# MASKED: __init__ function (lines 132-139)
def missing_host_key(self, client, hostname, key):
host_key_fingerprint = 'sha256:' + base64.b64encode(hashlib.new('sha256', key.asbytes()).digest()).decode('utf-8')
host_keys = paramiko.hostkeys.HostKeys()
host_keys_modified = False
known_hosts_file = self.application.config.get('ssh_known_hosts_file', os.path.join(GLib.get_user_config_dir(), 'king-phisher', 'known_hosts'))
if os.access(known_hosts_file, os.R_OK):
host_keys.load(known_hosts_file)
if host_keys.lookup(hostname):
if host_keys.check(hostname, key):
self.logger.debug("accepting known ssh host key {0} {1} {2}".format(hostname, key.get_name(), host_key_fingerprint))
return
self.logger.warning("ssh host key does not match known value for {0}".format(hostname))
dialog = HostKeyWarnDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('bad ssh host key for ' + hostname)
else:
dialog = HostKeyAcceptDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('unknown ssh host key not accepted by the user for ' + hostname)
host_keys.add(hostname, key.get_name(), key)
host_keys_modified = True
if host_keys_modified:
try:
host_keys.save(known_hosts_file)
os.chmod(known_hosts_file, 0o600)
except IOError if its.py_v2 else PermissionError:
self.logger.warning('failed to save the known_hosts file and set its permissions')
|
def __init__(self, application):
"""
:param application: The application which is using this policy.
:type application: :py:class:`.KingPhisherClientApplication`
"""
self.application = application
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
super(MissingHostKeyPolicy, self).__init__()
| 132 | 139 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/ssh_host_key.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import hashlib
import logging
import os
from king_phisher import its
from king_phisher import errors
from king_phisher.client import gui_utilities
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Pango
import paramiko
import paramiko.hostkeys
__all__ = ('HostKeyAcceptDialog', 'HostKeyWarnDialog')
class BaseHostKeyDialog(gui_utilities.GladeGObject):
"""
A base class for dialogs which show information about SSH host keys. It is
assumed that the widgets defined in :py:attr:`.dependencies` are present
including one button to accept the host key, and one to reject. The class's
default response can be set using :py:attr:`.default_response`.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_accept',
'button_reject',
'textview_key_details'
),
top_level=(
'StockApplyImage',
'StockStopImage'
)
)
top_gobject = 'dialog'
default_response = None
"""The response that should be selected as the default for the dialog."""
def __init__(self, application, hostname, key):
"""
:param application: The application to associate this popup dialog with.
:type application: :py:class:`.KingPhisherClientApplication`
:param str hostname: The hostname associated with the key.
:param key: The host's SSH key.
:type key: :py:class:`paramiko.pkey.PKey`
"""
super(BaseHostKeyDialog, self).__init__(application)
self.hostname = hostname
self.key = key
textview = self.gobjects['textview_key_details']
textview.modify_font(Pango.FontDescription('monospace 9'))
textview.get_buffer().set_text(self.key_details)
if self.default_response is not None:
button = self.dialog.get_widget_for_response(response_id=self.default_response)
button.grab_default()
@property
def key_details(self):
key_type = self.key.get_name().lower()
details = "Host: {0} ({1})\n".format(self.hostname, key_type)
if key_type.startswith('ssh-'):
key_type = key_type[4:]
key_type = key_type.split('-', 1)[0].upper()
details += "{0} key fingerprint is SHA256:{1}.\n".format(key_type, base64.b64encode(hashlib.new('sha256', self.key.asbytes()).digest()).decode('utf-8'))
details += "{0} key fingerprint is MD5:{1}.\n".format(key_type, binascii.b2a_hex(hashlib.new('md5', self.key.asbytes()).digest()).decode('utf-8'))
return details
def interact(self):
self.dialog.show_all()
response = self.dialog.run()
self.dialog.destroy()
return response
class HostKeyAcceptDialog(BaseHostKeyDialog):
"""
A dialog that shows an SSH host key for a host that has not previously had
one associated with it.
"""
default_button = Gtk.ResponseType.ACCEPT
class HostKeyWarnDialog(BaseHostKeyDialog):
"""
A dialog that warns about an SSH host key that does not match the one that
was previously stored for the host.
"""
default_button = Gtk.ResponseType.REJECT
def signal_checkbutton_toggled(self, button):
self.gobjects['button_accept'].set_sensitive(button.get_property('active'))
class MissingHostKeyPolicy(paramiko.MissingHostKeyPolicy):
"""
A host key policy for use with paramiko that will validate SSH host keys
correctly. If a key is new, the user will be prompted with
:py:class:`.HostKeyAcceptDialog` dialog to accept it or if the host key does
not match the user will be warned with :py:class:`.HostKeyWarnDialog`. The
host keys accepted through this policy are stored in an OpenSSH compatible
"known_hosts" file using paramiko.
"""
def __init__(self, application):
"""
:param application: The application which is using this policy.
:type application: :py:class:`.KingPhisherClientApplication`
"""
self.application = application
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
super(MissingHostKeyPolicy, self).__init__()
def missing_host_key(self, client, hostname, key):
host_key_fingerprint = 'sha256:' + base64.b64encode(hashlib.new('sha256', key.asbytes()).digest()).decode('utf-8')
host_keys = paramiko.hostkeys.HostKeys()
host_keys_modified = False
known_hosts_file = self.application.config.get('ssh_known_hosts_file', os.path.join(GLib.get_user_config_dir(), 'king-phisher', 'known_hosts'))
if os.access(known_hosts_file, os.R_OK):
host_keys.load(known_hosts_file)
if host_keys.lookup(hostname):
if host_keys.check(hostname, key):
self.logger.debug("accepting known ssh host key {0} {1} {2}".format(hostname, key.get_name(), host_key_fingerprint))
return
self.logger.warning("ssh host key does not match known value for {0}".format(hostname))
dialog = HostKeyWarnDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('bad ssh host key for ' + hostname)
else:
dialog = HostKeyAcceptDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('unknown ssh host key not accepted by the user for ' + hostname)
host_keys.add(hostname, key.get_name(), key)
host_keys_modified = True
if host_keys_modified:
try:
host_keys.save(known_hosts_file)
os.chmod(known_hosts_file, 0o600)
except IOError if its.py_v2 else PermissionError:
self.logger.warning('failed to save the known_hosts file and set its permissions')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.